1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#if !defined(__wasm__)
10
11#include "assembly.h"
12
13#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
14#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15
16#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
17#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
18
19#if defined(_AIX)
20 .toc
21#elif defined(__aarch64__) && defined(__ELF__) && defined(_LIBUNWIND_EXECUTE_ONLY_CODE)
22 .section .text,"axy",@progbits,unique,0
23#else
24 .text
25#endif
26
27#if !defined(__USING_SJLJ_EXCEPTIONS__)
28
29#if defined(__i386__)
30.att_syntax
31
32#
33# extern int __unw_getcontext(unw_context_t* thread_state)
34#
35# On entry:
36# + +
37# +-----------------------+
38# + thread_state pointer +
39# +-----------------------+
40# + return address +
41# +-----------------------+ <-- SP
42# + +
43#
44DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
45
46 _LIBUNWIND_CET_ENDBR
47 push %eax
48 movl 8(%esp), %eax
49 movl %ebx, 4(%eax)
50 movl %ecx, 8(%eax)
51 movl %edx, 12(%eax)
52 movl %edi, 16(%eax)
53 movl %esi, 20(%eax)
54 movl %ebp, 24(%eax)
55 movl %esp, %edx
56 addl $8, %edx
57 movl %edx, 28(%eax) # store what sp was at call site as esp
58 # skip ss
59 # skip eflags
60 movl 4(%esp), %edx
61 movl %edx, 40(%eax) # store return address as eip
62 # skip cs
63 # skip ds
64 # skip es
65 # skip fs
66 # skip gs
67 movl (%esp), %edx
68 movl %edx, (%eax) # store original eax
69 popl %eax
70 xorl %eax, %eax # return UNW_ESUCCESS
71 ret
72
73#elif defined(__arm64ec__)
74
75//
76// extern int __unw_getcontext(unw_context_t* thread_state)
77//
78// On entry:
79// thread_state pointer is in x0
80//
81 .section .text,"xr",discard,"#__unw_getcontext"
82 .p2align 2
83DEFINE_LIBUNWIND_FUNCTION("#__unw_getcontext")
84 stp x8, x27, [x0, #0x000] // rax, rbx
85 stp x0, x1, [x0, #0x010] // rcx, rdx
86 stp x26,x25, [x0, #0x020] // rdi, rsi
87 mov x1, sp
88 stp fp, x1, [x0, #0x030] // rbp, rsp
89 stp x2, x3, [x0, #0x040] // r8, r9
90 stp x4, x5, [x0, #0x050] // r10, r11
91 stp x19,x20, [x0, #0x060] // r12, r13
92 stp x21,x22, [x0, #0x070] // r14, r15
93 str x30, [x0, #0x080] // store return address as pc
94 stp q0, q1, [x0, #0x0b0] // xmm0, xmm1
95 stp q2, q3, [x0, #0x0d0] // xmm2, xmm3
96 stp q4, q5, [x0, #0x0f0] // xmm4, xmm5
97 stp q6, q7, [x0, #0x110] // xmm6, xmm7
98 stp q8, q9, [x0, #0x130] // xmm8, xmm9
99 stp q10,q11, [x0, #0x150] // xmm10,xmm11
100 stp q12,q13, [x0, #0x170] // xmm12,xmm13
101 stp q14,q15, [x0, #0x190] // xmm14,xmm15
102 mov x0, #0 // return UNW_ESUCCESS
103 ret
104
105 .weak_anti_dep __unw_getcontext
106 .set __unw_getcontext, "#__unw_getcontext"
107
108 .section .hybmp$x,"yi"
109 .symidx "#__unw_getcontext"
110 .symidx $ientry_thunk$cdecl$i8$i8
111 .word 1
112 .text
113
114#elif defined(__x86_64__)
115.att_syntax
116
117#
118# extern int __unw_getcontext(unw_context_t* thread_state)
119#
120# On entry:
121# thread_state pointer is in rdi
122#
123DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
124#if defined(_WIN64)
125#define PTR %rcx
126#define TMP %rdx
127#else
128#define PTR %rdi
129#define TMP %rsi
130#endif
131
132 _LIBUNWIND_CET_ENDBR
133 movq %rax, (PTR)
134 movq %rbx, 8(PTR)
135 movq %rcx, 16(PTR)
136 movq %rdx, 24(PTR)
137 movq %rdi, 32(PTR)
138 movq %rsi, 40(PTR)
139 movq %rbp, 48(PTR)
140 movq %rsp, 56(PTR)
141 addq $8, 56(PTR)
142 movq %r8, 64(PTR)
143 movq %r9, 72(PTR)
144 movq %r10, 80(PTR)
145 movq %r11, 88(PTR)
146 movq %r12, 96(PTR)
147 movq %r13,104(PTR)
148 movq %r14,112(PTR)
149 movq %r15,120(PTR)
150 movq (%rsp),TMP
151 movq TMP,128(PTR) # store return address as rip
152 # skip rflags
153 # skip cs
154 # skip fs
155 # skip gs
156
157#if defined(_WIN64)
158 movdqu %xmm0,176(PTR)
159 movdqu %xmm1,192(PTR)
160 movdqu %xmm2,208(PTR)
161 movdqu %xmm3,224(PTR)
162 movdqu %xmm4,240(PTR)
163 movdqu %xmm5,256(PTR)
164 movdqu %xmm6,272(PTR)
165 movdqu %xmm7,288(PTR)
166 movdqu %xmm8,304(PTR)
167 movdqu %xmm9,320(PTR)
168 movdqu %xmm10,336(PTR)
169 movdqu %xmm11,352(PTR)
170 movdqu %xmm12,368(PTR)
171 movdqu %xmm13,384(PTR)
172 movdqu %xmm14,400(PTR)
173 movdqu %xmm15,416(PTR)
174#endif
175 xorl %eax, %eax # return UNW_ESUCCESS
176 ret
177
178#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
179
180#
181# extern int __unw_getcontext(unw_context_t* thread_state)
182#
183# On entry:
184# thread_state pointer is in a0 ($4)
185#
186DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
187 .set push
188 .set noat
189 .set noreorder
190 .set nomacro
191 sw $1, (4 * 1)($4)
192 sw $2, (4 * 2)($4)
193 sw $3, (4 * 3)($4)
194 sw $4, (4 * 4)($4)
195 sw $5, (4 * 5)($4)
196 sw $6, (4 * 6)($4)
197 sw $7, (4 * 7)($4)
198 sw $8, (4 * 8)($4)
199 sw $9, (4 * 9)($4)
200 sw $10, (4 * 10)($4)
201 sw $11, (4 * 11)($4)
202 sw $12, (4 * 12)($4)
203 sw $13, (4 * 13)($4)
204 sw $14, (4 * 14)($4)
205 sw $15, (4 * 15)($4)
206 sw $16, (4 * 16)($4)
207 sw $17, (4 * 17)($4)
208 sw $18, (4 * 18)($4)
209 sw $19, (4 * 19)($4)
210 sw $20, (4 * 20)($4)
211 sw $21, (4 * 21)($4)
212 sw $22, (4 * 22)($4)
213 sw $23, (4 * 23)($4)
214 sw $24, (4 * 24)($4)
215 sw $25, (4 * 25)($4)
216 sw $26, (4 * 26)($4)
217 sw $27, (4 * 27)($4)
218 sw $28, (4 * 28)($4)
219 sw $29, (4 * 29)($4)
220 sw $30, (4 * 30)($4)
221 sw $31, (4 * 31)($4)
222 # Store return address to pc
223 sw $31, (4 * 32)($4)
224#if __mips_isa_rev < 6
225 # hi and lo
226 mfhi $8
227 sw $8, (4 * 33)($4)
228 mflo $8
229 sw $8, (4 * 34)($4)
230#endif
231#ifdef __mips_hard_float
232#if __mips_fpr != 64
233 sdc1 $f0, (4 * 36 + 8 * 0)($4)
234 sdc1 $f2, (4 * 36 + 8 * 2)($4)
235 sdc1 $f4, (4 * 36 + 8 * 4)($4)
236 sdc1 $f6, (4 * 36 + 8 * 6)($4)
237 sdc1 $f8, (4 * 36 + 8 * 8)($4)
238 sdc1 $f10, (4 * 36 + 8 * 10)($4)
239 sdc1 $f12, (4 * 36 + 8 * 12)($4)
240 sdc1 $f14, (4 * 36 + 8 * 14)($4)
241 sdc1 $f16, (4 * 36 + 8 * 16)($4)
242 sdc1 $f18, (4 * 36 + 8 * 18)($4)
243 sdc1 $f20, (4 * 36 + 8 * 20)($4)
244 sdc1 $f22, (4 * 36 + 8 * 22)($4)
245 sdc1 $f24, (4 * 36 + 8 * 24)($4)
246 sdc1 $f26, (4 * 36 + 8 * 26)($4)
247 sdc1 $f28, (4 * 36 + 8 * 28)($4)
248 sdc1 $f30, (4 * 36 + 8 * 30)($4)
249#else
250 sdc1 $f0, (4 * 36 + 8 * 0)($4)
251 sdc1 $f1, (4 * 36 + 8 * 1)($4)
252 sdc1 $f2, (4 * 36 + 8 * 2)($4)
253 sdc1 $f3, (4 * 36 + 8 * 3)($4)
254 sdc1 $f4, (4 * 36 + 8 * 4)($4)
255 sdc1 $f5, (4 * 36 + 8 * 5)($4)
256 sdc1 $f6, (4 * 36 + 8 * 6)($4)
257 sdc1 $f7, (4 * 36 + 8 * 7)($4)
258 sdc1 $f8, (4 * 36 + 8 * 8)($4)
259 sdc1 $f9, (4 * 36 + 8 * 9)($4)
260 sdc1 $f10, (4 * 36 + 8 * 10)($4)
261 sdc1 $f11, (4 * 36 + 8 * 11)($4)
262 sdc1 $f12, (4 * 36 + 8 * 12)($4)
263 sdc1 $f13, (4 * 36 + 8 * 13)($4)
264 sdc1 $f14, (4 * 36 + 8 * 14)($4)
265 sdc1 $f15, (4 * 36 + 8 * 15)($4)
266 sdc1 $f16, (4 * 36 + 8 * 16)($4)
267 sdc1 $f17, (4 * 36 + 8 * 17)($4)
268 sdc1 $f18, (4 * 36 + 8 * 18)($4)
269 sdc1 $f19, (4 * 36 + 8 * 19)($4)
270 sdc1 $f20, (4 * 36 + 8 * 20)($4)
271 sdc1 $f21, (4 * 36 + 8 * 21)($4)
272 sdc1 $f22, (4 * 36 + 8 * 22)($4)
273 sdc1 $f23, (4 * 36 + 8 * 23)($4)
274 sdc1 $f24, (4 * 36 + 8 * 24)($4)
275 sdc1 $f25, (4 * 36 + 8 * 25)($4)
276 sdc1 $f26, (4 * 36 + 8 * 26)($4)
277 sdc1 $f27, (4 * 36 + 8 * 27)($4)
278 sdc1 $f28, (4 * 36 + 8 * 28)($4)
279 sdc1 $f29, (4 * 36 + 8 * 29)($4)
280 sdc1 $f30, (4 * 36 + 8 * 30)($4)
281 sdc1 $f31, (4 * 36 + 8 * 31)($4)
282#endif
283#endif
284 jr $31
285 # return UNW_ESUCCESS
286 or $2, $0, $0
287 .set pop
288
289#elif defined(__mips64)
290
291#
292# extern int __unw_getcontext(unw_context_t* thread_state)
293#
294# On entry:
295# thread_state pointer is in a0 ($4)
296#
297DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
298 .set push
299 .set noat
300 .set noreorder
301 .set nomacro
302 .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
303 sd $\i, (8 * \i)($4)
304 .endr
305 # Store return address to pc
306 sd $31, (8 * 32)($4)
307#if __mips_isa_rev < 6
308 # hi and lo
309 mfhi $8
310 sd $8, (8 * 33)($4)
311 mflo $8
312 sd $8, (8 * 34)($4)
313#endif
314#ifdef __mips_hard_float
315 .irp i,FROM_0_TO_31
316 sdc1 $f\i, (280+8*\i)($4)
317 .endr
318#endif
319 jr $31
320 # return UNW_ESUCCESS
321 or $2, $0, $0
322 .set pop
323
324# elif defined(__mips__)
325
326#
327# extern int __unw_getcontext(unw_context_t* thread_state)
328#
329# Just trap for the time being.
330DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
331 teq $0, $0
332
333#elif defined(__powerpc64__)
334
335//
336// extern int __unw_getcontext(unw_context_t* thread_state)
337//
338// On entry:
339// thread_state pointer is in r3
340//
341#if defined(_AIX)
342DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
343#else
344DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
345#endif
346// store register (GPR)
347#define PPC64_STR(n) \
348 std n, (8 * (n + 2))(3)
349
350 // save GPRs
351 PPC64_STR(0)
352 mflr 0
353 std 0, PPC64_OFFS_SRR0(3) // store lr as ssr0
354 PPC64_STR(1)
355 PPC64_STR(4) // Save r4 first since it will be used for fixing r2.
356#if defined(_AIX)
357 // The TOC register (r2) was changed by the glue code if unw_getcontext
358 // is called from a different module. Save the original TOC register
359 // in the context if this is the case.
360 mflr 4
361 lwz 4, 0(4) // Get the first instruction at the return address.
362 xoris 0, 4, 0xe841 // Is it reloading the TOC register "ld 2,40(1)"?
363 cmplwi 0, 0x28
364 bne 0, LnoR2Fix // No need to fix up r2 if it is not.
365 ld 2, 40(1) // Use the saved TOC register in the stack.
366LnoR2Fix:
367#endif
368 PPC64_STR(2)
369 PPC64_STR(3)
370 PPC64_STR(5)
371 PPC64_STR(6)
372 PPC64_STR(7)
373 PPC64_STR(8)
374 PPC64_STR(9)
375 PPC64_STR(10)
376 PPC64_STR(11)
377 PPC64_STR(12)
378 PPC64_STR(13)
379 PPC64_STR(14)
380 PPC64_STR(15)
381 PPC64_STR(16)
382 PPC64_STR(17)
383 PPC64_STR(18)
384 PPC64_STR(19)
385 PPC64_STR(20)
386 PPC64_STR(21)
387 PPC64_STR(22)
388 PPC64_STR(23)
389 PPC64_STR(24)
390 PPC64_STR(25)
391 PPC64_STR(26)
392 PPC64_STR(27)
393 PPC64_STR(28)
394 PPC64_STR(29)
395 PPC64_STR(30)
396 PPC64_STR(31)
397
398 mfcr 0
399 std 0, PPC64_OFFS_CR(3)
400 mfxer 0
401 std 0, PPC64_OFFS_XER(3)
402#if defined(_AIX)
403 // LR value saved from the register is not used, initialize it to 0.
404 li 0, 0
405#else
406 mflr 0
407#endif
408 std 0, PPC64_OFFS_LR(3)
409 mfctr 0
410 std 0, PPC64_OFFS_CTR(3)
411 mfvrsave 0
412 std 0, PPC64_OFFS_VRSAVE(3)
413
414#if defined(__VSX__)
415 // save VS registers
416 // (note that this also saves floating point registers and V registers,
417 // because part of VS is mapped to these registers)
418
419 addi 4, 3, PPC64_OFFS_FP
420
421// store VS register
422#ifdef __LITTLE_ENDIAN__
423// For little-endian targets, we need a swap since stxvd2x will store the
424// register in the incorrect doubleword order.
425// FIXME: when supporting targets older than Power9 on LE is no longer required
426// this can be changed to simply `stxv n, 16 * n(4)`.
427#define PPC64_STVS(n) \
428 xxswapd n, n ;\
429 stxvd2x n, 0, 4 ;\
430 addi 4, 4, 16
431#else
432#define PPC64_STVS(n) \
433 stxvd2x n, 0, 4 ;\
434 addi 4, 4, 16
435#endif
436
437 PPC64_STVS(0)
438 PPC64_STVS(1)
439 PPC64_STVS(2)
440 PPC64_STVS(3)
441 PPC64_STVS(4)
442 PPC64_STVS(5)
443 PPC64_STVS(6)
444 PPC64_STVS(7)
445 PPC64_STVS(8)
446 PPC64_STVS(9)
447 PPC64_STVS(10)
448 PPC64_STVS(11)
449 PPC64_STVS(12)
450 PPC64_STVS(13)
451 PPC64_STVS(14)
452 PPC64_STVS(15)
453 PPC64_STVS(16)
454 PPC64_STVS(17)
455 PPC64_STVS(18)
456 PPC64_STVS(19)
457 PPC64_STVS(20)
458 PPC64_STVS(21)
459 PPC64_STVS(22)
460 PPC64_STVS(23)
461 PPC64_STVS(24)
462 PPC64_STVS(25)
463 PPC64_STVS(26)
464 PPC64_STVS(27)
465 PPC64_STVS(28)
466 PPC64_STVS(29)
467 PPC64_STVS(30)
468 PPC64_STVS(31)
469 PPC64_STVS(32)
470 PPC64_STVS(33)
471 PPC64_STVS(34)
472 PPC64_STVS(35)
473 PPC64_STVS(36)
474 PPC64_STVS(37)
475 PPC64_STVS(38)
476 PPC64_STVS(39)
477 PPC64_STVS(40)
478 PPC64_STVS(41)
479 PPC64_STVS(42)
480 PPC64_STVS(43)
481 PPC64_STVS(44)
482 PPC64_STVS(45)
483 PPC64_STVS(46)
484 PPC64_STVS(47)
485 PPC64_STVS(48)
486 PPC64_STVS(49)
487 PPC64_STVS(50)
488 PPC64_STVS(51)
489 PPC64_STVS(52)
490 PPC64_STVS(53)
491 PPC64_STVS(54)
492 PPC64_STVS(55)
493 PPC64_STVS(56)
494 PPC64_STVS(57)
495 PPC64_STVS(58)
496 PPC64_STVS(59)
497 PPC64_STVS(60)
498 PPC64_STVS(61)
499 PPC64_STVS(62)
500 PPC64_STVS(63)
501
502#else
503
504// store FP register
505#define PPC64_STF(n) \
506 stfd n, (PPC64_OFFS_FP + n * 16)(3)
507
508 // save float registers
509 PPC64_STF(0)
510 PPC64_STF(1)
511 PPC64_STF(2)
512 PPC64_STF(3)
513 PPC64_STF(4)
514 PPC64_STF(5)
515 PPC64_STF(6)
516 PPC64_STF(7)
517 PPC64_STF(8)
518 PPC64_STF(9)
519 PPC64_STF(10)
520 PPC64_STF(11)
521 PPC64_STF(12)
522 PPC64_STF(13)
523 PPC64_STF(14)
524 PPC64_STF(15)
525 PPC64_STF(16)
526 PPC64_STF(17)
527 PPC64_STF(18)
528 PPC64_STF(19)
529 PPC64_STF(20)
530 PPC64_STF(21)
531 PPC64_STF(22)
532 PPC64_STF(23)
533 PPC64_STF(24)
534 PPC64_STF(25)
535 PPC64_STF(26)
536 PPC64_STF(27)
537 PPC64_STF(28)
538 PPC64_STF(29)
539 PPC64_STF(30)
540 PPC64_STF(31)
541
542#if defined(__ALTIVEC__)
543 // save vector registers
544
545 // Use 16-bytes below the stack pointer as an
546 // aligned buffer to save each vector register.
547 // Note that the stack pointer is always 16-byte aligned.
548 subi 4, 1, 16
549
550#define PPC64_STV_UNALIGNED(n) \
551 stvx n, 0, 4 ;\
552 ld 5, 0(4) ;\
553 std 5, (PPC64_OFFS_V + n * 16)(3) ;\
554 ld 5, 8(4) ;\
555 std 5, (PPC64_OFFS_V + n * 16 + 8)(3)
556
557 PPC64_STV_UNALIGNED(0)
558 PPC64_STV_UNALIGNED(1)
559 PPC64_STV_UNALIGNED(2)
560 PPC64_STV_UNALIGNED(3)
561 PPC64_STV_UNALIGNED(4)
562 PPC64_STV_UNALIGNED(5)
563 PPC64_STV_UNALIGNED(6)
564 PPC64_STV_UNALIGNED(7)
565 PPC64_STV_UNALIGNED(8)
566 PPC64_STV_UNALIGNED(9)
567 PPC64_STV_UNALIGNED(10)
568 PPC64_STV_UNALIGNED(11)
569 PPC64_STV_UNALIGNED(12)
570 PPC64_STV_UNALIGNED(13)
571 PPC64_STV_UNALIGNED(14)
572 PPC64_STV_UNALIGNED(15)
573 PPC64_STV_UNALIGNED(16)
574 PPC64_STV_UNALIGNED(17)
575 PPC64_STV_UNALIGNED(18)
576 PPC64_STV_UNALIGNED(19)
577 PPC64_STV_UNALIGNED(20)
578 PPC64_STV_UNALIGNED(21)
579 PPC64_STV_UNALIGNED(22)
580 PPC64_STV_UNALIGNED(23)
581 PPC64_STV_UNALIGNED(24)
582 PPC64_STV_UNALIGNED(25)
583 PPC64_STV_UNALIGNED(26)
584 PPC64_STV_UNALIGNED(27)
585 PPC64_STV_UNALIGNED(28)
586 PPC64_STV_UNALIGNED(29)
587 PPC64_STV_UNALIGNED(30)
588 PPC64_STV_UNALIGNED(31)
589
590#endif
591#endif
592
593 li 3, 0 // return UNW_ESUCCESS
594 blr
595
596
597#elif defined(__powerpc__)
598
599//
600// extern int unw_getcontext(unw_context_t* thread_state)
601//
602// On entry:
603// thread_state pointer is in r3
604//
605#if defined(_AIX)
606DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
607#else
608DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
609#endif
610 stw 0, 8(3)
611 mflr 0
612 stw 0, 0(3) // store lr as ssr0
613 stw 1, 12(3)
614 stw 4, 24(3) // Save r4 first since it will be used for fixing r2.
615#if defined(_AIX)
616 // The TOC register (r2) was changed by the glue code if unw_getcontext
617 // is called from a different module. Save the original TOC register
618 // in the context if this is the case.
619 mflr 4
620 lwz 4, 0(4) // Get the instruction at the return address.
621 xoris 0, 4, 0x8041 // Is it reloading the TOC register "lwz 2,20(1)"?
622 cmplwi 0, 0x14
623 bne 0, LnoR2Fix // No need to fix up r2 if it is not.
624 lwz 2, 20(1) // Use the saved TOC register in the stack.
625LnoR2Fix:
626#endif
627 stw 2, 16(3)
628 stw 3, 20(3)
629 stw 5, 28(3)
630 stw 6, 32(3)
631 stw 7, 36(3)
632 stw 8, 40(3)
633 stw 9, 44(3)
634 stw 10, 48(3)
635 stw 11, 52(3)
636 stw 12, 56(3)
637 stw 13, 60(3)
638 stw 14, 64(3)
639 stw 15, 68(3)
640 stw 16, 72(3)
641 stw 17, 76(3)
642 stw 18, 80(3)
643 stw 19, 84(3)
644 stw 20, 88(3)
645 stw 21, 92(3)
646 stw 22, 96(3)
647 stw 23,100(3)
648 stw 24,104(3)
649 stw 25,108(3)
650 stw 26,112(3)
651 stw 27,116(3)
652 stw 28,120(3)
653 stw 29,124(3)
654 stw 30,128(3)
655 stw 31,132(3)
656
657#if defined(__ALTIVEC__)
658 // save VRSave register
659 mfspr 0, 256
660 stw 0, 156(3)
661#endif
662 // save CR registers
663 mfcr 0
664 stw 0, 136(3)
665#if defined(_AIX)
666 // LR value from the register is not used, initialize it to 0.
667 li 0, 0
668 stw 0, 144(3)
669#endif
670 // save CTR register
671 mfctr 0
672 stw 0, 148(3)
673
674#if !defined(__NO_FPRS__)
675 // save float registers
676 stfd 0, 160(3)
677 stfd 1, 168(3)
678 stfd 2, 176(3)
679 stfd 3, 184(3)
680 stfd 4, 192(3)
681 stfd 5, 200(3)
682 stfd 6, 208(3)
683 stfd 7, 216(3)
684 stfd 8, 224(3)
685 stfd 9, 232(3)
686 stfd 10,240(3)
687 stfd 11,248(3)
688 stfd 12,256(3)
689 stfd 13,264(3)
690 stfd 14,272(3)
691 stfd 15,280(3)
692 stfd 16,288(3)
693 stfd 17,296(3)
694 stfd 18,304(3)
695 stfd 19,312(3)
696 stfd 20,320(3)
697 stfd 21,328(3)
698 stfd 22,336(3)
699 stfd 23,344(3)
700 stfd 24,352(3)
701 stfd 25,360(3)
702 stfd 26,368(3)
703 stfd 27,376(3)
704 stfd 28,384(3)
705 stfd 29,392(3)
706 stfd 30,400(3)
707 stfd 31,408(3)
708#endif
709
710#if defined(__ALTIVEC__)
711 // save vector registers
712
713 subi 4, 1, 16
714 rlwinm 4, 4, 0, 0, 27 // mask low 4-bits
715 // r4 is now a 16-byte aligned pointer into the red zone
716
717#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
718 stvx _vec, 0, 4 SEPARATOR \
719 lwz 5, 0(4) SEPARATOR \
720 stw 5, _offset(3) SEPARATOR \
721 lwz 5, 4(4) SEPARATOR \
722 stw 5, _offset+4(3) SEPARATOR \
723 lwz 5, 8(4) SEPARATOR \
724 stw 5, _offset+8(3) SEPARATOR \
725 lwz 5, 12(4) SEPARATOR \
726 stw 5, _offset+12(3)
727
728 SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
729 SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
730 SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
731 SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
732 SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
733 SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
734 SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
735 SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
736 SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
737 SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
738 SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
739 SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
740 SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
741 SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
742 SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
743 SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
744 SAVE_VECTOR_UNALIGNED(16, 424+0x100)
745 SAVE_VECTOR_UNALIGNED(17, 424+0x110)
746 SAVE_VECTOR_UNALIGNED(18, 424+0x120)
747 SAVE_VECTOR_UNALIGNED(19, 424+0x130)
748 SAVE_VECTOR_UNALIGNED(20, 424+0x140)
749 SAVE_VECTOR_UNALIGNED(21, 424+0x150)
750 SAVE_VECTOR_UNALIGNED(22, 424+0x160)
751 SAVE_VECTOR_UNALIGNED(23, 424+0x170)
752 SAVE_VECTOR_UNALIGNED(24, 424+0x180)
753 SAVE_VECTOR_UNALIGNED(25, 424+0x190)
754 SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
755 SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
756 SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
757 SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
758 SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
759 SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
760#endif
761
762 li 3, 0 // return UNW_ESUCCESS
763 blr
764
765
766#elif defined(__aarch64__)
767
768#ifndef __has_feature
769#define __has_feature(__feature) 0
770#endif
771
772//
773// extern int __unw_getcontext(unw_context_t* thread_state)
774//
775// On entry:
776// thread_state pointer is in x0
777//
778 .p2align 2
779DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
780
781#if __has_feature(ptrauth_calls)
782 pacibsp
783#endif
784
785 stp x0, x1, [x0, #0x000]
786 stp x2, x3, [x0, #0x010]
787 stp x4, x5, [x0, #0x020]
788 stp x6, x7, [x0, #0x030]
789 stp x8, x9, [x0, #0x040]
790 stp x10,x11, [x0, #0x050]
791 stp x12,x13, [x0, #0x060]
792 stp x14,x15, [x0, #0x070]
793 stp x16,x17, [x0, #0x080]
794 stp x18,x19, [x0, #0x090]
795 stp x20,x21, [x0, #0x0A0]
796 stp x22,x23, [x0, #0x0B0]
797 stp x24,x25, [x0, #0x0C0]
798 stp x26,x27, [x0, #0x0D0]
799 stp x28,x29, [x0, #0x0E0]
800 str x30, [x0, #0x0F0]
801 mov x1,sp
802 str x1, [x0, #0x0F8]
803 str x30, [x0, #0x100] // store return address as pc
804 // skip cpsr
805#if defined(__ARM_FP) && __ARM_FP != 0
806 stp d0, d1, [x0, #0x110]
807 stp d2, d3, [x0, #0x120]
808 stp d4, d5, [x0, #0x130]
809 stp d6, d7, [x0, #0x140]
810 stp d8, d9, [x0, #0x150]
811 stp d10,d11, [x0, #0x160]
812 stp d12,d13, [x0, #0x170]
813 stp d14,d15, [x0, #0x180]
814 stp d16,d17, [x0, #0x190]
815 stp d18,d19, [x0, #0x1A0]
816 stp d20,d21, [x0, #0x1B0]
817 stp d22,d23, [x0, #0x1C0]
818 stp d24,d25, [x0, #0x1D0]
819 stp d26,d27, [x0, #0x1E0]
820 stp d28,d29, [x0, #0x1F0]
821 str d30, [x0, #0x200]
822 str d31, [x0, #0x208]
823#endif
824 mov x0, #0 // return UNW_ESUCCESS
825
826#if __has_feature(ptrauth_calls)
827 retab
828#else
829 ret
830#endif
831
832//
833// extern "C" int64_t __libunwind_Registers_arm64_za_disable()
834//
835// This function implements the requirements of the __arm_za_disable ABI
836// routine, except that it will not abort; it will return a non-zero value
837// to signify the routine failed.
838//
839// Note: This function uses SME instructions. It must only be called if SME
840// has been confirmed to be available.
841//
842// On return:
843//
844// A status is placed in x0. A zero value indicates success; any non-zero
845// value indicates failure.
846//
847 .p2align 2
848DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_za_disable)
849 .variant_pcs __libunwind_Registers_arm64_za_disable
850#if __has_feature(ptrauth_calls)
851 pacibsp
852#endif
853 // If TPIDR2_EL0 is null, the subroutine just disables ZA.
854 .inst 0xd53bd0b0 // mrs x16, TPIDR2_EL0
855 cbz x16, 1f
856
857 // If any of the reserved bytes in the first 16 bytes of the TPIDR2 block are
858 // nonzero, return a non-zero value (libunwind will then abort).
859 ldrh w0, [x16, #10]
860 cbnz w0, 2f
861 ldr w0, [x16, #12]
862 cbnz w0, 2f
863
864 // If num_za_save_slices is zero, the subroutine just disables ZA.
865 ldrh w0, [x16, #8]
866 cbz x0, 1f
867
868 // If za_save_buffer is NULL, the subroutine just disables ZA.
869 ldr x16, [x16]
870 cbz x16, 1f
871
872 // Store ZA to za_save_buffer.
873 mov x15, xzr
8740:
875 .inst 0xe1206200 // str za[w15,0], [x16]
876 .inst 0x04305830 // addsvl x16, x16, #1
877 add x15, x15, #1
878 cmp x0, x15
879 b.ne 0b
8801:
881 // * Set TPIDR2_EL0 to null.
882 .inst 0xd51bd0bf // msr TPIDR2_EL0, xzr
883 // * Set PSTATE.ZA to 0.
884 .inst 0xd503447f // smstop za
885 // * Return zero (success)
886 mov x0, xzr
8872:
888#if __has_feature(ptrauth_calls)
889 retab
890#else
891 ret
892#endif
893
894#elif defined(__arm__) && !defined(__APPLE__)
895
896#if !defined(__ARM_ARCH_ISA_ARM)
897#if (__ARM_ARCH_ISA_THUMB == 2)
898 .syntax unified
899#endif
900 .thumb
901#endif
902
903@
904@ extern int __unw_getcontext(unw_context_t* thread_state)
905@
906@ On entry:
907@ thread_state pointer is in r0
908@
909@ Per EHABI #4.7 this only saves the core integer registers.
910@ EHABI #7.4.5 notes that in general all VRS registers should be restored
911@ however this is very hard to do for VFP registers because it is unknown
912@ to the library how many registers are implemented by the architecture.
913@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
914@
915 .p2align 2
916DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
917#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
918 stm r0!, {r0-r7}
919 mov r1, r8
920 mov r2, r9
921 mov r3, r10
922 stm r0!, {r1-r3}
923 mov r1, r11
924 mov r2, sp
925 mov r3, lr
926 str r1, [r0, #0] @ r11
927 @ r12 does not need storing, it it the intra-procedure-call scratch register
928 str r2, [r0, #8] @ sp
929 str r3, [r0, #12] @ lr
930 str r3, [r0, #16] @ store return address as pc
931 @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
932 @ It is safe to use here though because we are about to return, and cpsr is
933 @ not expected to be preserved.
934 movs r0, #0 @ return UNW_ESUCCESS
935#else
936 @ 32bit thumb-2 restrictions for stm:
937 @ . the sp (r13) cannot be in the list
938 @ . the pc (r15) cannot be in the list in an STM instruction
939 stm r0, {r0-r12}
940 str sp, [r0, #52]
941 str lr, [r0, #56]
942 str lr, [r0, #60] @ store return address as pc
943 mov r0, #0 @ return UNW_ESUCCESS
944#endif
945 JMP(lr)
946
947@
948@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
949@
950@ On entry:
951@ values pointer is in r0
952@
953 .p2align 2
954#if defined(__ELF__)
955 .fpu vfpv3-d16
956#endif
957DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
958 vstmia r0, {d0-d15}
959 JMP(lr)
960
961@
962@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
963@
964@ On entry:
965@ values pointer is in r0
966@
967 .p2align 2
968#if defined(__ELF__)
969 .fpu vfpv3-d16
970#endif
971DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
972 vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
973 JMP(lr)
974
975@
976@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
977@
978@ On entry:
979@ values pointer is in r0
980@
981 .p2align 2
982#if defined(__ELF__)
983 .fpu vfpv3
984#endif
985DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
986 @ VFP and iwMMX instructions are only available when compiling with the flags
987 @ that enable them. We do not want to do that in the library (because we do not
988 @ want the compiler to generate instructions that access those) but this is
989 @ only accessed if the personality routine needs these registers. Use of
990 @ these registers implies they are, actually, available on the target, so
991 @ it's ok to execute.
992 @ So, generate the instructions using the corresponding coprocessor mnemonic.
993 vstmia r0, {d16-d31}
994 JMP(lr)
995
996#if defined(_LIBUNWIND_ARM_WMMX)
997
998@
999@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
1000@
1001@ On entry:
1002@ values pointer is in r0
1003@
1004 .p2align 2
1005#if defined(__ELF__)
1006 .arch armv5te
1007#endif
1008DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
1009 stcl p1, cr0, [r0], #8 @ wstrd wR0, [r0], #8
1010 stcl p1, cr1, [r0], #8 @ wstrd wR1, [r0], #8
1011 stcl p1, cr2, [r0], #8 @ wstrd wR2, [r0], #8
1012 stcl p1, cr3, [r0], #8 @ wstrd wR3, [r0], #8
1013 stcl p1, cr4, [r0], #8 @ wstrd wR4, [r0], #8
1014 stcl p1, cr5, [r0], #8 @ wstrd wR5, [r0], #8
1015 stcl p1, cr6, [r0], #8 @ wstrd wR6, [r0], #8
1016 stcl p1, cr7, [r0], #8 @ wstrd wR7, [r0], #8
1017 stcl p1, cr8, [r0], #8 @ wstrd wR8, [r0], #8
1018 stcl p1, cr9, [r0], #8 @ wstrd wR9, [r0], #8
1019 stcl p1, cr10, [r0], #8 @ wstrd wR10, [r0], #8
1020 stcl p1, cr11, [r0], #8 @ wstrd wR11, [r0], #8
1021 stcl p1, cr12, [r0], #8 @ wstrd wR12, [r0], #8
1022 stcl p1, cr13, [r0], #8 @ wstrd wR13, [r0], #8
1023 stcl p1, cr14, [r0], #8 @ wstrd wR14, [r0], #8
1024 stcl p1, cr15, [r0], #8 @ wstrd wR15, [r0], #8
1025 JMP(lr)
1026
1027@
1028@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
1029@
1030@ On entry:
1031@ values pointer is in r0
1032@
1033 .p2align 2
1034#if defined(__ELF__)
1035 .arch armv5te
1036#endif
1037DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
1038 stc2 p1, cr8, [r0], #4 @ wstrw wCGR0, [r0], #4
1039 stc2 p1, cr9, [r0], #4 @ wstrw wCGR1, [r0], #4
1040 stc2 p1, cr10, [r0], #4 @ wstrw wCGR2, [r0], #4
1041 stc2 p1, cr11, [r0], #4 @ wstrw wCGR3, [r0], #4
1042 JMP(lr)
1043
1044#endif
1045
1046#elif defined(__or1k__)
1047
1048#
1049# extern int __unw_getcontext(unw_context_t* thread_state)
1050#
1051# On entry:
1052# thread_state pointer is in r3
1053#
1054DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1055 l.sw 0(r3), r0
1056 l.sw 4(r3), r1
1057 l.sw 8(r3), r2
1058 l.sw 12(r3), r3
1059 l.sw 16(r3), r4
1060 l.sw 20(r3), r5
1061 l.sw 24(r3), r6
1062 l.sw 28(r3), r7
1063 l.sw 32(r3), r8
1064 l.sw 36(r3), r9
1065 l.sw 40(r3), r10
1066 l.sw 44(r3), r11
1067 l.sw 48(r3), r12
1068 l.sw 52(r3), r13
1069 l.sw 56(r3), r14
1070 l.sw 60(r3), r15
1071 l.sw 64(r3), r16
1072 l.sw 68(r3), r17
1073 l.sw 72(r3), r18
1074 l.sw 76(r3), r19
1075 l.sw 80(r3), r20
1076 l.sw 84(r3), r21
1077 l.sw 88(r3), r22
1078 l.sw 92(r3), r23
1079 l.sw 96(r3), r24
1080 l.sw 100(r3), r25
1081 l.sw 104(r3), r26
1082 l.sw 108(r3), r27
1083 l.sw 112(r3), r28
1084 l.sw 116(r3), r29
1085 l.sw 120(r3), r30
1086 l.sw 124(r3), r31
1087 # store ra to pc
1088 l.sw 128(r3), r9
1089 # zero epcr
1090 l.sw 132(r3), r0
1091
1092#elif defined(__hexagon__)
1093#
1094# extern int unw_getcontext(unw_context_t* thread_state)
1095#
1096# On entry:
1097# thread_state pointer is in r0
1098#
1099#define OFFSET(offset) (offset/4)
1100DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1101 memw(r0+#32) = r8
1102 memw(r0+#36) = r9
1103 memw(r0+#40) = r10
1104 memw(r0+#44) = r11
1105
1106 memw(r0+#48) = r12
1107 memw(r0+#52) = r13
1108 memw(r0+#56) = r14
1109 memw(r0+#60) = r15
1110
1111 memw(r0+#64) = r16
1112 memw(r0+#68) = r17
1113 memw(r0+#72) = r18
1114 memw(r0+#76) = r19
1115
1116 memw(r0+#80) = r20
1117 memw(r0+#84) = r21
1118 memw(r0+#88) = r22
1119 memw(r0+#92) = r23
1120
1121 memw(r0+#96) = r24
1122 memw(r0+#100) = r25
1123 memw(r0+#104) = r26
1124 memw(r0+#108) = r27
1125
1126 memw(r0+#112) = r28
1127 memw(r0+#116) = r29
1128 memw(r0+#120) = r30
1129 memw(r0+#124) = r31
1130 r1 = c4 // Predicate register
1131 memw(r0+#128) = r1
1132 r1 = memw(r30) // *FP == Saved FP
1133 r1 = r31
1134 memw(r0+#132) = r1
1135
1136 jumpr r31
1137
1138#elif defined(__sparc__) && defined(__arch64__)
1139
1140#
1141# extern int __unw_getcontext(unw_context_t* thread_state)
1142#
1143# On entry:
1144# thread_state pointer is in %o0
1145#
1146DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1147 .register %g2, #scratch
1148 .register %g3, #scratch
1149 .register %g6, #scratch
1150 .register %g7, #scratch
1151 stx %g1, [%o0 + 0x08]
1152 stx %g2, [%o0 + 0x10]
1153 stx %g3, [%o0 + 0x18]
1154 stx %g4, [%o0 + 0x20]
1155 stx %g5, [%o0 + 0x28]
1156 stx %g6, [%o0 + 0x30]
1157 stx %g7, [%o0 + 0x38]
1158 stx %o0, [%o0 + 0x40]
1159 stx %o1, [%o0 + 0x48]
1160 stx %o2, [%o0 + 0x50]
1161 stx %o3, [%o0 + 0x58]
1162 stx %o4, [%o0 + 0x60]
1163 stx %o5, [%o0 + 0x68]
1164 stx %o6, [%o0 + 0x70]
1165 stx %o7, [%o0 + 0x78]
1166 stx %l0, [%o0 + 0x80]
1167 stx %l1, [%o0 + 0x88]
1168 stx %l2, [%o0 + 0x90]
1169 stx %l3, [%o0 + 0x98]
1170 stx %l4, [%o0 + 0xa0]
1171 stx %l5, [%o0 + 0xa8]
1172 stx %l6, [%o0 + 0xb0]
1173 stx %l7, [%o0 + 0xb8]
1174 stx %i0, [%o0 + 0xc0]
1175 stx %i1, [%o0 + 0xc8]
1176 stx %i2, [%o0 + 0xd0]
1177 stx %i3, [%o0 + 0xd8]
1178 stx %i4, [%o0 + 0xe0]
1179 stx %i5, [%o0 + 0xe8]
1180 stx %i6, [%o0 + 0xf0]
1181 stx %i7, [%o0 + 0xf8]
1182
1183 # save StackGhost cookie
1184 mov %i7, %g4
1185 save %sp, -176, %sp
1186 # register window flush necessary even without StackGhost
1187 flushw
1188 restore
1189 ldx [%sp + 2047 + 0x78], %g5
1190 xor %g4, %g5, %g4
1191 stx %g4, [%o0 + 0x100]
1192 retl
1193 # return UNW_ESUCCESS
1194 clr %o0
1195
1196#elif defined(__sparc__)
1197
1198#
1199# extern int __unw_getcontext(unw_context_t* thread_state)
1200#
1201# On entry:
1202# thread_state pointer is in o0
1203#
1204DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1205 ta 3
1206 add %o7, 8, %o7
1207 std %g0, [%o0 + 0]
1208 std %g2, [%o0 + 8]
1209 std %g4, [%o0 + 16]
1210 std %g6, [%o0 + 24]
1211 std %o0, [%o0 + 32]
1212 std %o2, [%o0 + 40]
1213 std %o4, [%o0 + 48]
1214 std %o6, [%o0 + 56]
1215 std %l0, [%o0 + 64]
1216 std %l2, [%o0 + 72]
1217 std %l4, [%o0 + 80]
1218 std %l6, [%o0 + 88]
1219 std %i0, [%o0 + 96]
1220 std %i2, [%o0 + 104]
1221 std %i4, [%o0 + 112]
1222 std %i6, [%o0 + 120]
1223 jmp %o7
1224 clr %o0 // return UNW_ESUCCESS
1225
1226#elif defined(__riscv)
1227
1228#
1229# extern int __unw_getcontext(unw_context_t* thread_state)
1230#
1231# On entry:
1232# thread_state pointer is in a0
1233#
1234DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1235 ISTORE x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1236#if defined(__riscv_32e)
1237 .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1238#else
1239 .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1240#endif
1241 ISTORE x\i, (RISCV_ISIZE * \i)(a0)
1242 .endr
1243
1244# if defined(__riscv_flen)
1245 .irp i,FROM_0_TO_31
1246 FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1247 .endr
1248# endif
1249
1250 li a0, 0 // return UNW_ESUCCESS
1251 ret // jump to ra
1252
1253#elif defined(__s390x__)
1254
1255//
1256// extern int __unw_getcontext(unw_context_t* thread_state)
1257//
1258// On entry:
1259// thread_state pointer is in r2
1260//
1261DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1262
1263 // Save GPRs
1264 stmg %r0, %r15, 16(%r2)
1265
1266 // Save PSWM
1267 epsw %r0, %r1
1268 stm %r0, %r1, 0(%r2)
1269
1270 // Store return address as PSWA
1271 stg %r14, 8(%r2)
1272
1273 // Save FPRs
1274 .irp i,FROM_0_TO_15
1275 std %f\i, (144+8*\i)(%r2)
1276 .endr
1277
1278 // Return UNW_ESUCCESS
1279 lghi %r2, 0
1280 br %r14
1281
1282#elif defined(__loongarch__) && __loongarch_grlen == 64
1283
1284#
1285# extern int __unw_getcontext(unw_context_t* thread_state)
1286#
1287# On entry:
1288# thread_state pointer is in $a0($r4)
1289#
1290DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1291 .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1292 st.d $r\i, $a0, (8*\i)
1293 .endr
1294 st.d $r1, $a0, (8 * 32) // store $ra to pc
1295
1296# if __loongarch_frlen == 64
1297 .irp i,FROM_0_TO_31
1298 fst.d $f\i, $a0, (8 * 33 + 8 * \i)
1299 .endr
1300# endif
1301
1302 move $a0, $zero // UNW_ESUCCESS
1303 jr $ra
1304
1305#endif
1306
1307#ifdef __arm64ec__
1308 .globl "#unw_getcontext"
1309 .set "#unw_getcontext", "#__unw_getcontext"
1310 .weak_anti_dep unw_getcontext
1311 .set unw_getcontext, "#unw_getcontext"
1312 EXPORT_SYMBOL(unw_getcontext)
1313#else
1314 WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1315#endif
1316
1317#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1318
1319NO_EXEC_STACK_DIRECTIVE
1320
1321#endif /* !defined(__wasm__) */
1322