xref: /freebsd/contrib/llvm-project/libunwind/src/UnwindRegistersSave.S (revision d781ede639f2289ccf0889dd138169e1194b656b)
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11    .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16
17#
18# extern int __unw_getcontext(unw_context_t* thread_state)
19#
20# On entry:
21#   +                       +
22#   +-----------------------+
23#   + thread_state pointer  +
24#   +-----------------------+
25#   + return address        +
26#   +-----------------------+   <-- SP
27#   +                       +
28#
29DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
30
31  _LIBUNWIND_CET_ENDBR
32  push  %eax
33  movl  8(%esp), %eax
34  movl  %ebx,  4(%eax)
35  movl  %ecx,  8(%eax)
36  movl  %edx, 12(%eax)
37  movl  %edi, 16(%eax)
38  movl  %esi, 20(%eax)
39  movl  %ebp, 24(%eax)
40  movl  %esp, %edx
41  addl  $8, %edx
42  movl  %edx, 28(%eax)  # store what sp was at call site as esp
43  # skip ss
44  # skip eflags
45  movl  4(%esp), %edx
46  movl  %edx, 40(%eax)  # store return address as eip
47  # skip cs
48  # skip ds
49  # skip es
50  # skip fs
51  # skip gs
52  movl  (%esp), %edx
53  movl  %edx, (%eax)  # store original eax
54  popl  %eax
55  xorl  %eax, %eax    # return UNW_ESUCCESS
56  ret
57
58#elif defined(__x86_64__)
59
60#
61# extern int __unw_getcontext(unw_context_t* thread_state)
62#
63# On entry:
64#  thread_state pointer is in rdi
65#
66DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
67#if defined(_WIN64)
68#define PTR %rcx
69#define TMP %rdx
70#else
71#define PTR %rdi
72#define TMP %rsi
73#endif
74
75  _LIBUNWIND_CET_ENDBR
76  movq  %rax,   (PTR)
77  movq  %rbx,  8(PTR)
78  movq  %rcx, 16(PTR)
79  movq  %rdx, 24(PTR)
80  movq  %rdi, 32(PTR)
81  movq  %rsi, 40(PTR)
82  movq  %rbp, 48(PTR)
83  movq  %rsp, 56(PTR)
84  addq  $8,   56(PTR)
85  movq  %r8,  64(PTR)
86  movq  %r9,  72(PTR)
87  movq  %r10, 80(PTR)
88  movq  %r11, 88(PTR)
89  movq  %r12, 96(PTR)
90  movq  %r13,104(PTR)
91  movq  %r14,112(PTR)
92  movq  %r15,120(PTR)
93  movq  (%rsp),TMP
94  movq  TMP,128(PTR) # store return address as rip
95  # skip rflags
96  # skip cs
97  # skip fs
98  # skip gs
99
100#if defined(_WIN64)
101  movdqu %xmm0,176(PTR)
102  movdqu %xmm1,192(PTR)
103  movdqu %xmm2,208(PTR)
104  movdqu %xmm3,224(PTR)
105  movdqu %xmm4,240(PTR)
106  movdqu %xmm5,256(PTR)
107  movdqu %xmm6,272(PTR)
108  movdqu %xmm7,288(PTR)
109  movdqu %xmm8,304(PTR)
110  movdqu %xmm9,320(PTR)
111  movdqu %xmm10,336(PTR)
112  movdqu %xmm11,352(PTR)
113  movdqu %xmm12,368(PTR)
114  movdqu %xmm13,384(PTR)
115  movdqu %xmm14,400(PTR)
116  movdqu %xmm15,416(PTR)
117#endif
118  xorl  %eax, %eax    # return UNW_ESUCCESS
119  ret
120
121#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
122
123#
124# extern int __unw_getcontext(unw_context_t* thread_state)
125#
126# On entry:
127#  thread_state pointer is in a0 ($4)
128#
129DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
130  .set push
131  .set noat
132  .set noreorder
133  .set nomacro
134  sw    $1, (4 * 1)($4)
135  sw    $2, (4 * 2)($4)
136  sw    $3, (4 * 3)($4)
137  sw    $4, (4 * 4)($4)
138  sw    $5, (4 * 5)($4)
139  sw    $6, (4 * 6)($4)
140  sw    $7, (4 * 7)($4)
141  sw    $8, (4 * 8)($4)
142  sw    $9, (4 * 9)($4)
143  sw    $10, (4 * 10)($4)
144  sw    $11, (4 * 11)($4)
145  sw    $12, (4 * 12)($4)
146  sw    $13, (4 * 13)($4)
147  sw    $14, (4 * 14)($4)
148  sw    $15, (4 * 15)($4)
149  sw    $16, (4 * 16)($4)
150  sw    $17, (4 * 17)($4)
151  sw    $18, (4 * 18)($4)
152  sw    $19, (4 * 19)($4)
153  sw    $20, (4 * 20)($4)
154  sw    $21, (4 * 21)($4)
155  sw    $22, (4 * 22)($4)
156  sw    $23, (4 * 23)($4)
157  sw    $24, (4 * 24)($4)
158  sw    $25, (4 * 25)($4)
159  sw    $26, (4 * 26)($4)
160  sw    $27, (4 * 27)($4)
161  sw    $28, (4 * 28)($4)
162  sw    $29, (4 * 29)($4)
163  sw    $30, (4 * 30)($4)
164  sw    $31, (4 * 31)($4)
165  # Store return address to pc
166  sw    $31, (4 * 32)($4)
167  # hi and lo
168  mfhi  $8
169  sw    $8,  (4 * 33)($4)
170  mflo  $8
171  sw    $8,  (4 * 34)($4)
172#ifdef __mips_hard_float
173#if __mips_fpr != 64
174  sdc1  $f0, (4 * 36 + 8 * 0)($4)
175  sdc1  $f2, (4 * 36 + 8 * 2)($4)
176  sdc1  $f4, (4 * 36 + 8 * 4)($4)
177  sdc1  $f6, (4 * 36 + 8 * 6)($4)
178  sdc1  $f8, (4 * 36 + 8 * 8)($4)
179  sdc1  $f10, (4 * 36 + 8 * 10)($4)
180  sdc1  $f12, (4 * 36 + 8 * 12)($4)
181  sdc1  $f14, (4 * 36 + 8 * 14)($4)
182  sdc1  $f16, (4 * 36 + 8 * 16)($4)
183  sdc1  $f18, (4 * 36 + 8 * 18)($4)
184  sdc1  $f20, (4 * 36 + 8 * 20)($4)
185  sdc1  $f22, (4 * 36 + 8 * 22)($4)
186  sdc1  $f24, (4 * 36 + 8 * 24)($4)
187  sdc1  $f26, (4 * 36 + 8 * 26)($4)
188  sdc1  $f28, (4 * 36 + 8 * 28)($4)
189  sdc1  $f30, (4 * 36 + 8 * 30)($4)
190#else
191  sdc1  $f0, (4 * 36 + 8 * 0)($4)
192  sdc1  $f1, (4 * 36 + 8 * 1)($4)
193  sdc1  $f2, (4 * 36 + 8 * 2)($4)
194  sdc1  $f3, (4 * 36 + 8 * 3)($4)
195  sdc1  $f4, (4 * 36 + 8 * 4)($4)
196  sdc1  $f5, (4 * 36 + 8 * 5)($4)
197  sdc1  $f6, (4 * 36 + 8 * 6)($4)
198  sdc1  $f7, (4 * 36 + 8 * 7)($4)
199  sdc1  $f8, (4 * 36 + 8 * 8)($4)
200  sdc1  $f9, (4 * 36 + 8 * 9)($4)
201  sdc1  $f10, (4 * 36 + 8 * 10)($4)
202  sdc1  $f11, (4 * 36 + 8 * 11)($4)
203  sdc1  $f12, (4 * 36 + 8 * 12)($4)
204  sdc1  $f13, (4 * 36 + 8 * 13)($4)
205  sdc1  $f14, (4 * 36 + 8 * 14)($4)
206  sdc1  $f15, (4 * 36 + 8 * 15)($4)
207  sdc1  $f16, (4 * 36 + 8 * 16)($4)
208  sdc1  $f17, (4 * 36 + 8 * 17)($4)
209  sdc1  $f18, (4 * 36 + 8 * 18)($4)
210  sdc1  $f19, (4 * 36 + 8 * 19)($4)
211  sdc1  $f20, (4 * 36 + 8 * 20)($4)
212  sdc1  $f21, (4 * 36 + 8 * 21)($4)
213  sdc1  $f22, (4 * 36 + 8 * 22)($4)
214  sdc1  $f23, (4 * 36 + 8 * 23)($4)
215  sdc1  $f24, (4 * 36 + 8 * 24)($4)
216  sdc1  $f25, (4 * 36 + 8 * 25)($4)
217  sdc1  $f26, (4 * 36 + 8 * 26)($4)
218  sdc1  $f27, (4 * 36 + 8 * 27)($4)
219  sdc1  $f28, (4 * 36 + 8 * 28)($4)
220  sdc1  $f29, (4 * 36 + 8 * 29)($4)
221  sdc1  $f30, (4 * 36 + 8 * 30)($4)
222  sdc1  $f31, (4 * 36 + 8 * 31)($4)
223#endif
224#endif
225  jr	$31
226  # return UNW_ESUCCESS
227  or    $2, $0, $0
228  .set pop
229
230#elif defined(__mips64)
231
232#
233# extern int __unw_getcontext(unw_context_t* thread_state)
234#
235# On entry:
236#  thread_state pointer is in a0 ($4)
237#
238DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
239  .set push
240  .set noat
241  .set noreorder
242  .set nomacro
243  sd    $1, (8 * 1)($4)
244  sd    $2, (8 * 2)($4)
245  sd    $3, (8 * 3)($4)
246  sd    $4, (8 * 4)($4)
247  sd    $5, (8 * 5)($4)
248  sd    $6, (8 * 6)($4)
249  sd    $7, (8 * 7)($4)
250  sd    $8, (8 * 8)($4)
251  sd    $9, (8 * 9)($4)
252  sd    $10, (8 * 10)($4)
253  sd    $11, (8 * 11)($4)
254  sd    $12, (8 * 12)($4)
255  sd    $13, (8 * 13)($4)
256  sd    $14, (8 * 14)($4)
257  sd    $15, (8 * 15)($4)
258  sd    $16, (8 * 16)($4)
259  sd    $17, (8 * 17)($4)
260  sd    $18, (8 * 18)($4)
261  sd    $19, (8 * 19)($4)
262  sd    $20, (8 * 20)($4)
263  sd    $21, (8 * 21)($4)
264  sd    $22, (8 * 22)($4)
265  sd    $23, (8 * 23)($4)
266  sd    $24, (8 * 24)($4)
267  sd    $25, (8 * 25)($4)
268  sd    $26, (8 * 26)($4)
269  sd    $27, (8 * 27)($4)
270  sd    $28, (8 * 28)($4)
271  sd    $29, (8 * 29)($4)
272  sd    $30, (8 * 30)($4)
273  sd    $31, (8 * 31)($4)
274  # Store return address to pc
275  sd    $31, (8 * 32)($4)
276  # hi and lo
277  mfhi  $8
278  sd    $8,  (8 * 33)($4)
279  mflo  $8
280  sd    $8,  (8 * 34)($4)
281#ifdef __mips_hard_float
282  sdc1  $f0, (8 * 35)($4)
283  sdc1  $f1, (8 * 36)($4)
284  sdc1  $f2, (8 * 37)($4)
285  sdc1  $f3, (8 * 38)($4)
286  sdc1  $f4, (8 * 39)($4)
287  sdc1  $f5, (8 * 40)($4)
288  sdc1  $f6, (8 * 41)($4)
289  sdc1  $f7, (8 * 42)($4)
290  sdc1  $f8, (8 * 43)($4)
291  sdc1  $f9, (8 * 44)($4)
292  sdc1  $f10, (8 * 45)($4)
293  sdc1  $f11, (8 * 46)($4)
294  sdc1  $f12, (8 * 47)($4)
295  sdc1  $f13, (8 * 48)($4)
296  sdc1  $f14, (8 * 49)($4)
297  sdc1  $f15, (8 * 50)($4)
298  sdc1  $f16, (8 * 51)($4)
299  sdc1  $f17, (8 * 52)($4)
300  sdc1  $f18, (8 * 53)($4)
301  sdc1  $f19, (8 * 54)($4)
302  sdc1  $f20, (8 * 55)($4)
303  sdc1  $f21, (8 * 56)($4)
304  sdc1  $f22, (8 * 57)($4)
305  sdc1  $f23, (8 * 58)($4)
306  sdc1  $f24, (8 * 59)($4)
307  sdc1  $f25, (8 * 60)($4)
308  sdc1  $f26, (8 * 61)($4)
309  sdc1  $f27, (8 * 62)($4)
310  sdc1  $f28, (8 * 63)($4)
311  sdc1  $f29, (8 * 64)($4)
312  sdc1  $f30, (8 * 65)($4)
313  sdc1  $f31, (8 * 66)($4)
314#endif
315  jr	$31
316  # return UNW_ESUCCESS
317  or    $2, $0, $0
318  .set pop
319
320# elif defined(__mips__)
321
322#
323# extern int __unw_getcontext(unw_context_t* thread_state)
324#
325# Just trap for the time being.
326DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
327  teq $0, $0
328
329#elif defined(__powerpc64__)
330
331//
332// extern int __unw_getcontext(unw_context_t* thread_state)
333//
334// On entry:
335//  thread_state pointer is in r3
336//
337DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
338
339// store register (GPR)
340#define PPC64_STR(n) \
341  std   n, (8 * (n + 2))(3)
342
343  // save GPRs
344  PPC64_STR(0)
345  mflr  0
346  std   0, PPC64_OFFS_SRR0(3) // store lr as ssr0
347  PPC64_STR(1)
348  PPC64_STR(2)
349  PPC64_STR(3)
350  PPC64_STR(4)
351  PPC64_STR(5)
352  PPC64_STR(6)
353  PPC64_STR(7)
354  PPC64_STR(8)
355  PPC64_STR(9)
356  PPC64_STR(10)
357  PPC64_STR(11)
358  PPC64_STR(12)
359  PPC64_STR(13)
360  PPC64_STR(14)
361  PPC64_STR(15)
362  PPC64_STR(16)
363  PPC64_STR(17)
364  PPC64_STR(18)
365  PPC64_STR(19)
366  PPC64_STR(20)
367  PPC64_STR(21)
368  PPC64_STR(22)
369  PPC64_STR(23)
370  PPC64_STR(24)
371  PPC64_STR(25)
372  PPC64_STR(26)
373  PPC64_STR(27)
374  PPC64_STR(28)
375  PPC64_STR(29)
376  PPC64_STR(30)
377  PPC64_STR(31)
378
379  mfcr  0
380  std   0,  PPC64_OFFS_CR(3)
381  mfxer 0
382  std   0,  PPC64_OFFS_XER(3)
383  mflr  0
384  std   0,  PPC64_OFFS_LR(3)
385  mfctr 0
386  std   0,  PPC64_OFFS_CTR(3)
387  mfvrsave    0
388  std   0,  PPC64_OFFS_VRSAVE(3)
389
390#if defined(__VSX__)
391  // save VS registers
392  // (note that this also saves floating point registers and V registers,
393  // because part of VS is mapped to these registers)
394
395  addi  4, 3, PPC64_OFFS_FP
396
397// store VS register
398#define PPC64_STVS(n)      \
399  stxvd2x n, 0, 4         ;\
400  addi    4, 4, 16
401
402  PPC64_STVS(0)
403  PPC64_STVS(1)
404  PPC64_STVS(2)
405  PPC64_STVS(3)
406  PPC64_STVS(4)
407  PPC64_STVS(5)
408  PPC64_STVS(6)
409  PPC64_STVS(7)
410  PPC64_STVS(8)
411  PPC64_STVS(9)
412  PPC64_STVS(10)
413  PPC64_STVS(11)
414  PPC64_STVS(12)
415  PPC64_STVS(13)
416  PPC64_STVS(14)
417  PPC64_STVS(15)
418  PPC64_STVS(16)
419  PPC64_STVS(17)
420  PPC64_STVS(18)
421  PPC64_STVS(19)
422  PPC64_STVS(20)
423  PPC64_STVS(21)
424  PPC64_STVS(22)
425  PPC64_STVS(23)
426  PPC64_STVS(24)
427  PPC64_STVS(25)
428  PPC64_STVS(26)
429  PPC64_STVS(27)
430  PPC64_STVS(28)
431  PPC64_STVS(29)
432  PPC64_STVS(30)
433  PPC64_STVS(31)
434  PPC64_STVS(32)
435  PPC64_STVS(33)
436  PPC64_STVS(34)
437  PPC64_STVS(35)
438  PPC64_STVS(36)
439  PPC64_STVS(37)
440  PPC64_STVS(38)
441  PPC64_STVS(39)
442  PPC64_STVS(40)
443  PPC64_STVS(41)
444  PPC64_STVS(42)
445  PPC64_STVS(43)
446  PPC64_STVS(44)
447  PPC64_STVS(45)
448  PPC64_STVS(46)
449  PPC64_STVS(47)
450  PPC64_STVS(48)
451  PPC64_STVS(49)
452  PPC64_STVS(50)
453  PPC64_STVS(51)
454  PPC64_STVS(52)
455  PPC64_STVS(53)
456  PPC64_STVS(54)
457  PPC64_STVS(55)
458  PPC64_STVS(56)
459  PPC64_STVS(57)
460  PPC64_STVS(58)
461  PPC64_STVS(59)
462  PPC64_STVS(60)
463  PPC64_STVS(61)
464  PPC64_STVS(62)
465  PPC64_STVS(63)
466
467#else
468
469// store FP register
470#define PPC64_STF(n) \
471  stfd  n, (PPC64_OFFS_FP + n * 16)(3)
472
473  // save float registers
474  PPC64_STF(0)
475  PPC64_STF(1)
476  PPC64_STF(2)
477  PPC64_STF(3)
478  PPC64_STF(4)
479  PPC64_STF(5)
480  PPC64_STF(6)
481  PPC64_STF(7)
482  PPC64_STF(8)
483  PPC64_STF(9)
484  PPC64_STF(10)
485  PPC64_STF(11)
486  PPC64_STF(12)
487  PPC64_STF(13)
488  PPC64_STF(14)
489  PPC64_STF(15)
490  PPC64_STF(16)
491  PPC64_STF(17)
492  PPC64_STF(18)
493  PPC64_STF(19)
494  PPC64_STF(20)
495  PPC64_STF(21)
496  PPC64_STF(22)
497  PPC64_STF(23)
498  PPC64_STF(24)
499  PPC64_STF(25)
500  PPC64_STF(26)
501  PPC64_STF(27)
502  PPC64_STF(28)
503  PPC64_STF(29)
504  PPC64_STF(30)
505  PPC64_STF(31)
506
507#if defined(__ALTIVEC__)
508  // save vector registers
509
510  // Use 16-bytes below the stack pointer as an
511  // aligned buffer to save each vector register.
512  // Note that the stack pointer is always 16-byte aligned.
513  subi  4, 1, 16
514
515#define PPC64_STV_UNALIGNED(n)             \
516  stvx  n, 0, 4                           ;\
517  ld    5, 0(4)                           ;\
518  std   5, (PPC64_OFFS_V + n * 16)(3)     ;\
519  ld    5, 8(4)                           ;\
520  std   5, (PPC64_OFFS_V + n * 16 + 8)(3)
521
522  PPC64_STV_UNALIGNED(0)
523  PPC64_STV_UNALIGNED(1)
524  PPC64_STV_UNALIGNED(2)
525  PPC64_STV_UNALIGNED(3)
526  PPC64_STV_UNALIGNED(4)
527  PPC64_STV_UNALIGNED(5)
528  PPC64_STV_UNALIGNED(6)
529  PPC64_STV_UNALIGNED(7)
530  PPC64_STV_UNALIGNED(8)
531  PPC64_STV_UNALIGNED(9)
532  PPC64_STV_UNALIGNED(10)
533  PPC64_STV_UNALIGNED(11)
534  PPC64_STV_UNALIGNED(12)
535  PPC64_STV_UNALIGNED(13)
536  PPC64_STV_UNALIGNED(14)
537  PPC64_STV_UNALIGNED(15)
538  PPC64_STV_UNALIGNED(16)
539  PPC64_STV_UNALIGNED(17)
540  PPC64_STV_UNALIGNED(18)
541  PPC64_STV_UNALIGNED(19)
542  PPC64_STV_UNALIGNED(20)
543  PPC64_STV_UNALIGNED(21)
544  PPC64_STV_UNALIGNED(22)
545  PPC64_STV_UNALIGNED(23)
546  PPC64_STV_UNALIGNED(24)
547  PPC64_STV_UNALIGNED(25)
548  PPC64_STV_UNALIGNED(26)
549  PPC64_STV_UNALIGNED(27)
550  PPC64_STV_UNALIGNED(28)
551  PPC64_STV_UNALIGNED(29)
552  PPC64_STV_UNALIGNED(30)
553  PPC64_STV_UNALIGNED(31)
554
555#endif
556#endif
557
558  li    3,  0   // return UNW_ESUCCESS
559  blr
560
561
562#elif defined(__powerpc__)
563
564//
565// extern int unw_getcontext(unw_context_t* thread_state)
566//
567// On entry:
568//  thread_state pointer is in r3
569//
570DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
571  stw     0,   8(3)
572  mflr    0
573  stw     0,   0(3) // store lr as ssr0
574  stw     1,  12(3)
575  stw     2,  16(3)
576  stw     3,  20(3)
577  stw     4,  24(3)
578  stw     5,  28(3)
579  stw     6,  32(3)
580  stw     7,  36(3)
581  stw     8,  40(3)
582  stw     9,  44(3)
583  stw     10, 48(3)
584  stw     11, 52(3)
585  stw     12, 56(3)
586  stw     13, 60(3)
587  stw     14, 64(3)
588  stw     15, 68(3)
589  stw     16, 72(3)
590  stw     17, 76(3)
591  stw     18, 80(3)
592  stw     19, 84(3)
593  stw     20, 88(3)
594  stw     21, 92(3)
595  stw     22, 96(3)
596  stw     23,100(3)
597  stw     24,104(3)
598  stw     25,108(3)
599  stw     26,112(3)
600  stw     27,116(3)
601  stw     28,120(3)
602  stw     29,124(3)
603  stw     30,128(3)
604  stw     31,132(3)
605
606#if defined(__ALTIVEC__)
607  // save VRSave register
608  mfspr   0, 256
609  stw     0, 156(3)
610#endif
611  // save CR registers
612  mfcr    0
613  stw     0, 136(3)
614  // save CTR register
615  mfctr   0
616  stw     0, 148(3)
617
618#if !defined(__NO_FPRS__)
619  // save float registers
620  stfd    0, 160(3)
621  stfd    1, 168(3)
622  stfd    2, 176(3)
623  stfd    3, 184(3)
624  stfd    4, 192(3)
625  stfd    5, 200(3)
626  stfd    6, 208(3)
627  stfd    7, 216(3)
628  stfd    8, 224(3)
629  stfd    9, 232(3)
630  stfd    10,240(3)
631  stfd    11,248(3)
632  stfd    12,256(3)
633  stfd    13,264(3)
634  stfd    14,272(3)
635  stfd    15,280(3)
636  stfd    16,288(3)
637  stfd    17,296(3)
638  stfd    18,304(3)
639  stfd    19,312(3)
640  stfd    20,320(3)
641  stfd    21,328(3)
642  stfd    22,336(3)
643  stfd    23,344(3)
644  stfd    24,352(3)
645  stfd    25,360(3)
646  stfd    26,368(3)
647  stfd    27,376(3)
648  stfd    28,384(3)
649  stfd    29,392(3)
650  stfd    30,400(3)
651  stfd    31,408(3)
652#endif
653
654#if defined(__ALTIVEC__)
655  // save vector registers
656
657  subi    4, 1, 16
658  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
659  // r4 is now a 16-byte aligned pointer into the red zone
660
661#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
662  stvx    _vec, 0, 4               SEPARATOR \
663  lwz     5, 0(4)                  SEPARATOR \
664  stw     5, _offset(3)            SEPARATOR \
665  lwz     5, 4(4)                  SEPARATOR \
666  stw     5, _offset+4(3)          SEPARATOR \
667  lwz     5, 8(4)                  SEPARATOR \
668  stw     5, _offset+8(3)          SEPARATOR \
669  lwz     5, 12(4)                 SEPARATOR \
670  stw     5, _offset+12(3)
671
672  SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
673  SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
674  SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
675  SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
676  SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
677  SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
678  SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
679  SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
680  SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
681  SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
682  SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
683  SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
684  SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
685  SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
686  SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
687  SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
688  SAVE_VECTOR_UNALIGNED(16, 424+0x100)
689  SAVE_VECTOR_UNALIGNED(17, 424+0x110)
690  SAVE_VECTOR_UNALIGNED(18, 424+0x120)
691  SAVE_VECTOR_UNALIGNED(19, 424+0x130)
692  SAVE_VECTOR_UNALIGNED(20, 424+0x140)
693  SAVE_VECTOR_UNALIGNED(21, 424+0x150)
694  SAVE_VECTOR_UNALIGNED(22, 424+0x160)
695  SAVE_VECTOR_UNALIGNED(23, 424+0x170)
696  SAVE_VECTOR_UNALIGNED(24, 424+0x180)
697  SAVE_VECTOR_UNALIGNED(25, 424+0x190)
698  SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
699  SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
700  SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
701  SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
702  SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
703  SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
704#endif
705
706  li      3, 0  // return UNW_ESUCCESS
707  blr
708
709
710#elif defined(__aarch64__)
711
712//
713// extern int __unw_getcontext(unw_context_t* thread_state)
714//
715// On entry:
716//  thread_state pointer is in x0
717//
718  .p2align 2
719DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
720  stp    x0, x1,  [x0, #0x000]
721  stp    x2, x3,  [x0, #0x010]
722  stp    x4, x5,  [x0, #0x020]
723  stp    x6, x7,  [x0, #0x030]
724  stp    x8, x9,  [x0, #0x040]
725  stp    x10,x11, [x0, #0x050]
726  stp    x12,x13, [x0, #0x060]
727  stp    x14,x15, [x0, #0x070]
728  stp    x16,x17, [x0, #0x080]
729  stp    x18,x19, [x0, #0x090]
730  stp    x20,x21, [x0, #0x0A0]
731  stp    x22,x23, [x0, #0x0B0]
732  stp    x24,x25, [x0, #0x0C0]
733  stp    x26,x27, [x0, #0x0D0]
734  stp    x28,x29, [x0, #0x0E0]
735  str    x30,     [x0, #0x0F0]
736  mov    x1,sp
737  str    x1,      [x0, #0x0F8]
738  str    x30,     [x0, #0x100]    // store return address as pc
739  // skip cpsr
740  stp    d0, d1,  [x0, #0x110]
741  stp    d2, d3,  [x0, #0x120]
742  stp    d4, d5,  [x0, #0x130]
743  stp    d6, d7,  [x0, #0x140]
744  stp    d8, d9,  [x0, #0x150]
745  stp    d10,d11, [x0, #0x160]
746  stp    d12,d13, [x0, #0x170]
747  stp    d14,d15, [x0, #0x180]
748  stp    d16,d17, [x0, #0x190]
749  stp    d18,d19, [x0, #0x1A0]
750  stp    d20,d21, [x0, #0x1B0]
751  stp    d22,d23, [x0, #0x1C0]
752  stp    d24,d25, [x0, #0x1D0]
753  stp    d26,d27, [x0, #0x1E0]
754  stp    d28,d29, [x0, #0x1F0]
755  str    d30,     [x0, #0x200]
756  str    d31,     [x0, #0x208]
757  mov    x0, #0                   // return UNW_ESUCCESS
758  ret
759
760#elif defined(__arm__) && !defined(__APPLE__)
761
762#if !defined(__ARM_ARCH_ISA_ARM)
763#if (__ARM_ARCH_ISA_THUMB == 2)
764  .syntax unified
765#endif
766  .thumb
767#endif
768
769@
770@ extern int __unw_getcontext(unw_context_t* thread_state)
771@
772@ On entry:
773@  thread_state pointer is in r0
774@
775@ Per EHABI #4.7 this only saves the core integer registers.
776@ EHABI #7.4.5 notes that in general all VRS registers should be restored
777@ however this is very hard to do for VFP registers because it is unknown
778@ to the library how many registers are implemented by the architecture.
779@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
780@
781  .p2align 2
782DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
783#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
784  stm r0!, {r0-r7}
785  mov r1, r8
786  mov r2, r9
787  mov r3, r10
788  stm r0!, {r1-r3}
789  mov r1, r11
790  mov r2, sp
791  mov r3, lr
792  str r1, [r0, #0]   @ r11
793  @ r12 does not need storing, it it the intra-procedure-call scratch register
794  str r2, [r0, #8]   @ sp
795  str r3, [r0, #12]  @ lr
796  str r3, [r0, #16]  @ store return address as pc
797  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
798  @ It is safe to use here though because we are about to return, and cpsr is
799  @ not expected to be preserved.
800  movs r0, #0        @ return UNW_ESUCCESS
801#else
802  @ 32bit thumb-2 restrictions for stm:
803  @ . the sp (r13) cannot be in the list
804  @ . the pc (r15) cannot be in the list in an STM instruction
805  stm r0, {r0-r12}
806  str sp, [r0, #52]
807  str lr, [r0, #56]
808  str lr, [r0, #60]  @ store return address as pc
809  mov r0, #0         @ return UNW_ESUCCESS
810#endif
811  JMP(lr)
812
813@
814@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
815@
816@ On entry:
817@  values pointer is in r0
818@
819  .p2align 2
820#if defined(__ELF__)
821  .fpu vfpv3-d16
822#endif
823DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
824  vstmia r0, {d0-d15}
825  JMP(lr)
826
827@
828@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
829@
830@ On entry:
831@  values pointer is in r0
832@
833  .p2align 2
834#if defined(__ELF__)
835  .fpu vfpv3-d16
836#endif
837DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
838  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
839  JMP(lr)
840
841@
842@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
843@
844@ On entry:
845@  values pointer is in r0
846@
847  .p2align 2
848#if defined(__ELF__)
849  .fpu vfpv3
850#endif
851DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
852  @ VFP and iwMMX instructions are only available when compiling with the flags
853  @ that enable them. We do not want to do that in the library (because we do not
854  @ want the compiler to generate instructions that access those) but this is
855  @ only accessed if the personality routine needs these registers. Use of
856  @ these registers implies they are, actually, available on the target, so
857  @ it's ok to execute.
858  @ So, generate the instructions using the corresponding coprocessor mnemonic.
859  vstmia r0, {d16-d31}
860  JMP(lr)
861
862#if defined(_LIBUNWIND_ARM_WMMX)
863
864@
865@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
866@
867@ On entry:
868@  values pointer is in r0
869@
870  .p2align 2
871#if defined(__ELF__)
872  .arch armv5te
873#endif
874DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
875  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
876  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
877  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
878  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
879  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
880  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
881  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
882  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
883  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
884  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
885  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
886  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
887  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
888  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
889  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
890  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
891  JMP(lr)
892
893@
894@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
895@
896@ On entry:
897@  values pointer is in r0
898@
899  .p2align 2
900#if defined(__ELF__)
901  .arch armv5te
902#endif
903DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
904  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
905  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
906  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
907  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
908  JMP(lr)
909
910#endif
911
912#elif defined(__or1k__)
913
914#
915# extern int __unw_getcontext(unw_context_t* thread_state)
916#
917# On entry:
918#  thread_state pointer is in r3
919#
920DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
921  l.sw       0(r3), r0
922  l.sw       4(r3), r1
923  l.sw       8(r3), r2
924  l.sw      12(r3), r3
925  l.sw      16(r3), r4
926  l.sw      20(r3), r5
927  l.sw      24(r3), r6
928  l.sw      28(r3), r7
929  l.sw      32(r3), r8
930  l.sw      36(r3), r9
931  l.sw      40(r3), r10
932  l.sw      44(r3), r11
933  l.sw      48(r3), r12
934  l.sw      52(r3), r13
935  l.sw      56(r3), r14
936  l.sw      60(r3), r15
937  l.sw      64(r3), r16
938  l.sw      68(r3), r17
939  l.sw      72(r3), r18
940  l.sw      76(r3), r19
941  l.sw      80(r3), r20
942  l.sw      84(r3), r21
943  l.sw      88(r3), r22
944  l.sw      92(r3), r23
945  l.sw      96(r3), r24
946  l.sw     100(r3), r25
947  l.sw     104(r3), r26
948  l.sw     108(r3), r27
949  l.sw     112(r3), r28
950  l.sw     116(r3), r29
951  l.sw     120(r3), r30
952  l.sw     124(r3), r31
953  # store ra to pc
954  l.sw     128(r3), r9
955  # zero epcr
956  l.sw     132(r3), r0
957
958#elif defined(__hexagon__)
959#
960# extern int unw_getcontext(unw_context_t* thread_state)
961#
962# On entry:
963#  thread_state pointer is in r0
964#
965#define OFFSET(offset) (offset/4)
966DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
967  memw(r0+#32) = r8
968  memw(r0+#36) = r9
969  memw(r0+#40) = r10
970  memw(r0+#44) = r11
971
972  memw(r0+#48) = r12
973  memw(r0+#52) = r13
974  memw(r0+#56) = r14
975  memw(r0+#60) = r15
976
977  memw(r0+#64) = r16
978  memw(r0+#68) = r17
979  memw(r0+#72) = r18
980  memw(r0+#76) = r19
981
982  memw(r0+#80) = r20
983  memw(r0+#84) = r21
984  memw(r0+#88) = r22
985  memw(r0+#92) = r23
986
987  memw(r0+#96) = r24
988  memw(r0+#100) = r25
989  memw(r0+#104) = r26
990  memw(r0+#108) = r27
991
992  memw(r0+#112) = r28
993  memw(r0+#116) = r29
994  memw(r0+#120) = r30
995  memw(r0+#124) = r31
996  r1 = c4   // Predicate register
997  memw(r0+#128) = r1
998  r1 = memw(r30)           // *FP == Saved FP
999  r1 = r31
1000  memw(r0+#132) = r1
1001
1002  jumpr r31
1003
1004#elif defined(__sparc__) && defined(__arch64__)
1005
1006#
1007# extern int __unw_getcontext(unw_context_t* thread_state)
1008#
1009# On entry:
1010#  thread_state pointer is in %o0
1011#
1012DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1013  .register %g2, #scratch
1014  .register %g3, #scratch
1015  .register %g6, #scratch
1016  .register %g7, #scratch
1017  stx  %g1, [%o0 + 0x08]
1018  stx  %g2, [%o0 + 0x10]
1019  stx  %g3, [%o0 + 0x18]
1020  stx  %g4, [%o0 + 0x20]
1021  stx  %g5, [%o0 + 0x28]
1022  stx  %g6, [%o0 + 0x30]
1023  stx  %g7, [%o0 + 0x38]
1024  stx  %o0, [%o0 + 0x40]
1025  stx  %o1, [%o0 + 0x48]
1026  stx  %o2, [%o0 + 0x50]
1027  stx  %o3, [%o0 + 0x58]
1028  stx  %o4, [%o0 + 0x60]
1029  stx  %o5, [%o0 + 0x68]
1030  stx  %o6, [%o0 + 0x70]
1031  stx  %o7, [%o0 + 0x78]
1032  stx  %l0, [%o0 + 0x80]
1033  stx  %l1, [%o0 + 0x88]
1034  stx  %l2, [%o0 + 0x90]
1035  stx  %l3, [%o0 + 0x98]
1036  stx  %l4, [%o0 + 0xa0]
1037  stx  %l5, [%o0 + 0xa8]
1038  stx  %l6, [%o0 + 0xb0]
1039  stx  %l7, [%o0 + 0xb8]
1040  stx  %i0, [%o0 + 0xc0]
1041  stx  %i1, [%o0 + 0xc8]
1042  stx  %i2, [%o0 + 0xd0]
1043  stx  %i3, [%o0 + 0xd8]
1044  stx  %i4, [%o0 + 0xe0]
1045  stx  %i5, [%o0 + 0xe8]
1046  stx  %i6, [%o0 + 0xf0]
1047  stx  %i7, [%o0 + 0xf8]
1048
1049  # save StackGhost cookie
1050  mov  %i7, %g4
1051  save %sp, -176, %sp
1052  # register window flush necessary even without StackGhost
1053  flushw
1054  restore
1055  ldx  [%sp + 2047 + 0x78], %g5
1056  xor  %g4, %g5, %g4
1057  stx  %g4, [%o0 + 0x100]
1058  retl
1059  # return UNW_ESUCCESS
1060   clr %o0
1061
1062#elif defined(__sparc__)
1063
1064#
1065# extern int __unw_getcontext(unw_context_t* thread_state)
1066#
1067# On entry:
1068#  thread_state pointer is in o0
1069#
1070DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1071  ta 3
1072  add %o7, 8, %o7
1073  std %g0, [%o0 +   0]
1074  std %g2, [%o0 +   8]
1075  std %g4, [%o0 +  16]
1076  std %g6, [%o0 +  24]
1077  std %o0, [%o0 +  32]
1078  std %o2, [%o0 +  40]
1079  std %o4, [%o0 +  48]
1080  std %o6, [%o0 +  56]
1081  std %l0, [%o0 +  64]
1082  std %l2, [%o0 +  72]
1083  std %l4, [%o0 +  80]
1084  std %l6, [%o0 +  88]
1085  std %i0, [%o0 +  96]
1086  std %i2, [%o0 + 104]
1087  std %i4, [%o0 + 112]
1088  std %i6, [%o0 + 120]
1089  jmp %o7
1090   clr %o0                   // return UNW_ESUCCESS
1091
1092#elif defined(__riscv)
1093
1094#
1095# extern int __unw_getcontext(unw_context_t* thread_state)
1096#
1097# On entry:
1098#  thread_state pointer is in a0
1099#
1100DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1101  ISTORE    x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1102  ISTORE    x1, (RISCV_ISIZE * 1)(a0)
1103  ISTORE    x2, (RISCV_ISIZE * 2)(a0)
1104  ISTORE    x3, (RISCV_ISIZE * 3)(a0)
1105  ISTORE    x4, (RISCV_ISIZE * 4)(a0)
1106  ISTORE    x5, (RISCV_ISIZE * 5)(a0)
1107  ISTORE    x6, (RISCV_ISIZE * 6)(a0)
1108  ISTORE    x7, (RISCV_ISIZE * 7)(a0)
1109  ISTORE    x8, (RISCV_ISIZE * 8)(a0)
1110  ISTORE    x9, (RISCV_ISIZE * 9)(a0)
1111  ISTORE    x10, (RISCV_ISIZE * 10)(a0)
1112  ISTORE    x11, (RISCV_ISIZE * 11)(a0)
1113  ISTORE    x12, (RISCV_ISIZE * 12)(a0)
1114  ISTORE    x13, (RISCV_ISIZE * 13)(a0)
1115  ISTORE    x14, (RISCV_ISIZE * 14)(a0)
1116  ISTORE    x15, (RISCV_ISIZE * 15)(a0)
1117  ISTORE    x16, (RISCV_ISIZE * 16)(a0)
1118  ISTORE    x17, (RISCV_ISIZE * 17)(a0)
1119  ISTORE    x18, (RISCV_ISIZE * 18)(a0)
1120  ISTORE    x19, (RISCV_ISIZE * 19)(a0)
1121  ISTORE    x20, (RISCV_ISIZE * 20)(a0)
1122  ISTORE    x21, (RISCV_ISIZE * 21)(a0)
1123  ISTORE    x22, (RISCV_ISIZE * 22)(a0)
1124  ISTORE    x23, (RISCV_ISIZE * 23)(a0)
1125  ISTORE    x24, (RISCV_ISIZE * 24)(a0)
1126  ISTORE    x25, (RISCV_ISIZE * 25)(a0)
1127  ISTORE    x26, (RISCV_ISIZE * 26)(a0)
1128  ISTORE    x27, (RISCV_ISIZE * 27)(a0)
1129  ISTORE    x28, (RISCV_ISIZE * 28)(a0)
1130  ISTORE    x29, (RISCV_ISIZE * 29)(a0)
1131  ISTORE    x30, (RISCV_ISIZE * 30)(a0)
1132  ISTORE    x31, (RISCV_ISIZE * 31)(a0)
1133
1134# if defined(__riscv_flen)
1135  FSTORE    f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
1136  FSTORE    f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
1137  FSTORE    f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
1138  FSTORE    f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
1139  FSTORE    f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
1140  FSTORE    f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
1141  FSTORE    f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
1142  FSTORE    f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
1143  FSTORE    f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
1144  FSTORE    f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
1145  FSTORE    f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
1146  FSTORE    f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
1147  FSTORE    f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
1148  FSTORE    f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
1149  FSTORE    f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
1150  FSTORE    f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
1151  FSTORE    f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
1152  FSTORE    f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
1153  FSTORE    f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
1154  FSTORE    f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
1155  FSTORE    f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
1156  FSTORE    f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
1157  FSTORE    f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
1158  FSTORE    f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
1159  FSTORE    f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
1160  FSTORE    f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
1161  FSTORE    f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
1162  FSTORE    f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
1163  FSTORE    f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
1164  FSTORE    f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
1165  FSTORE    f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
1166  FSTORE    f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
1167# endif
1168
1169  li     a0, 0  // return UNW_ESUCCESS
1170  ret           // jump to ra
1171#endif
1172
1173  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1174
1175#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1176
1177NO_EXEC_STACK_DIRECTIVE
1178