xref: /freebsd/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S (revision e25152834cdf3b353892835a4f3b157e066a8ed4)
1//===-------------------- UnwindRegistersRestore.S ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11  .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
17#
18# void libunwind::Registers_x86::jumpto()
19#
20#if defined(_WIN32)
21# On windows, the 'this' pointer is passed in ecx instead of on the stack
22  movl   %ecx, %eax
23#else
24# On entry:
25#  +                       +
26#  +-----------------------+
27#  + thread_state pointer  +
28#  +-----------------------+
29#  + return address        +
30#  +-----------------------+   <-- SP
31#  +                       +
32  movl   4(%esp), %eax
33#endif
34  # set up eax and ret on new stack location
35  movl  28(%eax), %edx # edx holds new stack pointer
36  subl  $8,%edx
37  movl  %edx, 28(%eax)
38  movl  0(%eax), %ebx
39  movl  %ebx, 0(%edx)
40  movl  40(%eax), %ebx
41  movl  %ebx, 4(%edx)
42  # we now have ret and eax pushed onto where new stack will be
43  # restore all registers
44  movl   4(%eax), %ebx
45  movl   8(%eax), %ecx
46  movl  12(%eax), %edx
47  movl  16(%eax), %edi
48  movl  20(%eax), %esi
49  movl  24(%eax), %ebp
50  movl  28(%eax), %esp
51  # skip ss
52  # skip eflags
53  pop    %eax  # eax was already pushed on new stack
54  ret        # eip was already pushed on new stack
55  # skip cs
56  # skip ds
57  # skip es
58  # skip fs
59  # skip gs
60
61#elif defined(__x86_64__)
62
63DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
64#
65# void libunwind::Registers_x86_64::jumpto()
66#
67#if defined(_WIN64)
68# On entry, thread_state pointer is in rcx; move it into rdi
69# to share restore code below. Since this routine restores and
70# overwrites all registers, we can use the same registers for
71# pointers and temporaries as on unix even though win64 normally
72# mustn't clobber some of them.
73  movq  %rcx, %rdi
74#else
75# On entry, thread_state pointer is in rdi
76#endif
77
78  movq  56(%rdi), %rax # rax holds new stack pointer
79  subq  $16, %rax
80  movq  %rax, 56(%rdi)
81  movq  32(%rdi), %rbx  # store new rdi on new stack
82  movq  %rbx, 0(%rax)
83  movq  128(%rdi), %rbx # store new rip on new stack
84  movq  %rbx, 8(%rax)
85  # restore all registers
86  movq    0(%rdi), %rax
87  movq    8(%rdi), %rbx
88  movq   16(%rdi), %rcx
89  movq   24(%rdi), %rdx
90  # restore rdi later
91  movq   40(%rdi), %rsi
92  movq   48(%rdi), %rbp
93  # restore rsp later
94  movq   64(%rdi), %r8
95  movq   72(%rdi), %r9
96  movq   80(%rdi), %r10
97  movq   88(%rdi), %r11
98  movq   96(%rdi), %r12
99  movq  104(%rdi), %r13
100  movq  112(%rdi), %r14
101  movq  120(%rdi), %r15
102  # skip rflags
103  # skip cs
104  # skip fs
105  # skip gs
106
107#if defined(_WIN64)
108  movdqu 176(%rdi),%xmm0
109  movdqu 192(%rdi),%xmm1
110  movdqu 208(%rdi),%xmm2
111  movdqu 224(%rdi),%xmm3
112  movdqu 240(%rdi),%xmm4
113  movdqu 256(%rdi),%xmm5
114  movdqu 272(%rdi),%xmm6
115  movdqu 288(%rdi),%xmm7
116  movdqu 304(%rdi),%xmm8
117  movdqu 320(%rdi),%xmm9
118  movdqu 336(%rdi),%xmm10
119  movdqu 352(%rdi),%xmm11
120  movdqu 368(%rdi),%xmm12
121  movdqu 384(%rdi),%xmm13
122  movdqu 400(%rdi),%xmm14
123  movdqu 416(%rdi),%xmm15
124#endif
125  movq  56(%rdi), %rsp  # cut back rsp to new location
126  pop    %rdi      # rdi was saved here earlier
127  ret            # rip was saved here
128
129
130#elif defined(__powerpc64__)
131
132DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
133//
134// void libunwind::Registers_ppc64::jumpto()
135//
136// On entry:
137//  thread_state pointer is in r3
138//
139
140// load register (GPR)
141#define PPC64_LR(n) \
142  ld    %r##n, (8 * (n + 2))(%r3)
143
144  // restore integral registers
145  // skip r0 for now
146  // skip r1 for now
147  PPC64_LR(2)
148  // skip r3 for now
149  // skip r4 for now
150  // skip r5 for now
151  PPC64_LR(6)
152  PPC64_LR(7)
153  PPC64_LR(8)
154  PPC64_LR(9)
155  PPC64_LR(10)
156  PPC64_LR(11)
157  PPC64_LR(12)
158  PPC64_LR(13)
159  PPC64_LR(14)
160  PPC64_LR(15)
161  PPC64_LR(16)
162  PPC64_LR(17)
163  PPC64_LR(18)
164  PPC64_LR(19)
165  PPC64_LR(20)
166  PPC64_LR(21)
167  PPC64_LR(22)
168  PPC64_LR(23)
169  PPC64_LR(24)
170  PPC64_LR(25)
171  PPC64_LR(26)
172  PPC64_LR(27)
173  PPC64_LR(28)
174  PPC64_LR(29)
175  PPC64_LR(30)
176  PPC64_LR(31)
177
178#ifdef PPC64_HAS_VMX
179
180  // restore VS registers
181  // (note that this also restores floating point registers and V registers,
182  // because part of VS is mapped to these registers)
183
184  addi  %r4, %r3, PPC64_OFFS_FP
185
186// load VS register
187#define PPC64_LVS(n)         \
188  lxvd2x  %vs##n, 0, %r4    ;\
189  addi    %r4, %r4, 16
190
191  // restore the first 32 VS regs (and also all floating point regs)
192  PPC64_LVS(0)
193  PPC64_LVS(1)
194  PPC64_LVS(2)
195  PPC64_LVS(3)
196  PPC64_LVS(4)
197  PPC64_LVS(5)
198  PPC64_LVS(6)
199  PPC64_LVS(7)
200  PPC64_LVS(8)
201  PPC64_LVS(9)
202  PPC64_LVS(10)
203  PPC64_LVS(11)
204  PPC64_LVS(12)
205  PPC64_LVS(13)
206  PPC64_LVS(14)
207  PPC64_LVS(15)
208  PPC64_LVS(16)
209  PPC64_LVS(17)
210  PPC64_LVS(18)
211  PPC64_LVS(19)
212  PPC64_LVS(20)
213  PPC64_LVS(21)
214  PPC64_LVS(22)
215  PPC64_LVS(23)
216  PPC64_LVS(24)
217  PPC64_LVS(25)
218  PPC64_LVS(26)
219  PPC64_LVS(27)
220  PPC64_LVS(28)
221  PPC64_LVS(29)
222  PPC64_LVS(30)
223  PPC64_LVS(31)
224
225  // use VRSAVE to conditionally restore the remaining VS regs,
226  // that are where the V regs are mapped
227
228  ld    %r5, PPC64_OFFS_VRSAVE(%r3)   // test VRsave
229  cmpwi %r5, 0
230  beq   Lnovec
231
232// conditionally load VS
233#define PPC64_CLVS_BOTTOM(n)               \
234  beq    Ldone##n                         ;\
235  addi   %r4, %r3, PPC64_OFFS_FP + n * 16 ;\
236  lxvd2x %vs##n, 0, %r4                   ;\
237Ldone##n:
238
239#define PPC64_CLVSl(n)           \
240  andis. %r0, %r5, (1<<(47-n))  ;\
241PPC64_CLVS_BOTTOM(n)
242
243#define PPC64_CLVSh(n)           \
244  andi.  %r0, %r5, (1<<(63-n))  ;\
245PPC64_CLVS_BOTTOM(n)
246
247  PPC64_CLVSl(32)
248  PPC64_CLVSl(33)
249  PPC64_CLVSl(34)
250  PPC64_CLVSl(35)
251  PPC64_CLVSl(36)
252  PPC64_CLVSl(37)
253  PPC64_CLVSl(38)
254  PPC64_CLVSl(39)
255  PPC64_CLVSl(40)
256  PPC64_CLVSl(41)
257  PPC64_CLVSl(42)
258  PPC64_CLVSl(43)
259  PPC64_CLVSl(44)
260  PPC64_CLVSl(45)
261  PPC64_CLVSl(46)
262  PPC64_CLVSl(47)
263  PPC64_CLVSh(48)
264  PPC64_CLVSh(49)
265  PPC64_CLVSh(50)
266  PPC64_CLVSh(51)
267  PPC64_CLVSh(52)
268  PPC64_CLVSh(53)
269  PPC64_CLVSh(54)
270  PPC64_CLVSh(55)
271  PPC64_CLVSh(56)
272  PPC64_CLVSh(57)
273  PPC64_CLVSh(58)
274  PPC64_CLVSh(59)
275  PPC64_CLVSh(60)
276  PPC64_CLVSh(61)
277  PPC64_CLVSh(62)
278  PPC64_CLVSh(63)
279
280#else
281
282// load FP register
283#define PPC64_LF(n) \
284  lfd   %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
285
286  // restore float registers
287  PPC64_LF(0)
288  PPC64_LF(1)
289  PPC64_LF(2)
290  PPC64_LF(3)
291  PPC64_LF(4)
292  PPC64_LF(5)
293  PPC64_LF(6)
294  PPC64_LF(7)
295  PPC64_LF(8)
296  PPC64_LF(9)
297  PPC64_LF(10)
298  PPC64_LF(11)
299  PPC64_LF(12)
300  PPC64_LF(13)
301  PPC64_LF(14)
302  PPC64_LF(15)
303  PPC64_LF(16)
304  PPC64_LF(17)
305  PPC64_LF(18)
306  PPC64_LF(19)
307  PPC64_LF(20)
308  PPC64_LF(21)
309  PPC64_LF(22)
310  PPC64_LF(23)
311  PPC64_LF(24)
312  PPC64_LF(25)
313  PPC64_LF(26)
314  PPC64_LF(27)
315  PPC64_LF(28)
316  PPC64_LF(29)
317  PPC64_LF(30)
318  PPC64_LF(31)
319
320  // restore vector registers if any are in use
321  ld    %r5, PPC64_OFFS_VRSAVE(%r3)   // test VRsave
322  cmpwi %r5, 0
323  beq   Lnovec
324
325  subi  %r4, %r1, 16
326  // r4 is now a 16-byte aligned pointer into the red zone
327  // the _vectorScalarRegisters may not be 16-byte aligned
328  // so copy via red zone temp buffer
329
330#define PPC64_CLV_UNALIGNED_BOTTOM(n)            \
331  beq    Ldone##n                               ;\
332  ld     %r0, (PPC64_OFFS_V + n * 16)(%r3)      ;\
333  std    %r0, 0(%r4)                            ;\
334  ld     %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3)  ;\
335  std    %r0, 8(%r4)                            ;\
336  lvx    %v##n, 0, %r4                          ;\
337Ldone  ## n:
338
339#define PPC64_CLV_UNALIGNEDl(n)  \
340  andis. %r0, %r5, (1<<(15-n))  ;\
341PPC64_CLV_UNALIGNED_BOTTOM(n)
342
343#define PPC64_CLV_UNALIGNEDh(n)  \
344  andi.  %r0, %r5, (1<<(31-n))  ;\
345PPC64_CLV_UNALIGNED_BOTTOM(n)
346
347  PPC64_CLV_UNALIGNEDl(0)
348  PPC64_CLV_UNALIGNEDl(1)
349  PPC64_CLV_UNALIGNEDl(2)
350  PPC64_CLV_UNALIGNEDl(3)
351  PPC64_CLV_UNALIGNEDl(4)
352  PPC64_CLV_UNALIGNEDl(5)
353  PPC64_CLV_UNALIGNEDl(6)
354  PPC64_CLV_UNALIGNEDl(7)
355  PPC64_CLV_UNALIGNEDl(8)
356  PPC64_CLV_UNALIGNEDl(9)
357  PPC64_CLV_UNALIGNEDl(10)
358  PPC64_CLV_UNALIGNEDl(11)
359  PPC64_CLV_UNALIGNEDl(12)
360  PPC64_CLV_UNALIGNEDl(13)
361  PPC64_CLV_UNALIGNEDl(14)
362  PPC64_CLV_UNALIGNEDl(15)
363  PPC64_CLV_UNALIGNEDh(16)
364  PPC64_CLV_UNALIGNEDh(17)
365  PPC64_CLV_UNALIGNEDh(18)
366  PPC64_CLV_UNALIGNEDh(19)
367  PPC64_CLV_UNALIGNEDh(20)
368  PPC64_CLV_UNALIGNEDh(21)
369  PPC64_CLV_UNALIGNEDh(22)
370  PPC64_CLV_UNALIGNEDh(23)
371  PPC64_CLV_UNALIGNEDh(24)
372  PPC64_CLV_UNALIGNEDh(25)
373  PPC64_CLV_UNALIGNEDh(26)
374  PPC64_CLV_UNALIGNEDh(27)
375  PPC64_CLV_UNALIGNEDh(28)
376  PPC64_CLV_UNALIGNEDh(29)
377  PPC64_CLV_UNALIGNEDh(30)
378  PPC64_CLV_UNALIGNEDh(31)
379
380#endif
381
382Lnovec:
383  ld    %r0, PPC64_OFFS_CR(%r3)
384  mtcr  %r0
385  ld    %r0, PPC64_OFFS_SRR0(%r3)
386  mtctr %r0
387
388  PPC64_LR(0)
389  PPC64_LR(5)
390  PPC64_LR(4)
391  PPC64_LR(1)
392  PPC64_LR(3)
393  bctr
394
395#elif defined(__ppc__)
396
397DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
398//
399// void libunwind::Registers_ppc::jumpto()
400//
401// On entry:
402//  thread_state pointer is in r3
403//
404
405  // restore integral registerrs
406  // skip r0 for now
407  // skip r1 for now
408  lwz     %r2,  16(%r3)
409  // skip r3 for now
410  // skip r4 for now
411  // skip r5 for now
412  lwz     %r6,  32(%r3)
413  lwz     %r7,  36(%r3)
414  lwz     %r8,  40(%r3)
415  lwz     %r9,  44(%r3)
416  lwz     %r10, 48(%r3)
417  lwz     %r11, 52(%r3)
418  lwz     %r12, 56(%r3)
419  lwz     %r13, 60(%r3)
420  lwz     %r14, 64(%r3)
421  lwz     %r15, 68(%r3)
422  lwz     %r16, 72(%r3)
423  lwz     %r17, 76(%r3)
424  lwz     %r18, 80(%r3)
425  lwz     %r19, 84(%r3)
426  lwz     %r20, 88(%r3)
427  lwz     %r21, 92(%r3)
428  lwz     %r22, 96(%r3)
429  lwz     %r23,100(%r3)
430  lwz     %r24,104(%r3)
431  lwz     %r25,108(%r3)
432  lwz     %r26,112(%r3)
433  lwz     %r27,116(%r3)
434  lwz     %r28,120(%r3)
435  lwz     %r29,124(%r3)
436  lwz     %r30,128(%r3)
437  lwz     %r31,132(%r3)
438
439  // restore float registers
440  lfd     %f0, 160(%r3)
441  lfd     %f1, 168(%r3)
442  lfd     %f2, 176(%r3)
443  lfd     %f3, 184(%r3)
444  lfd     %f4, 192(%r3)
445  lfd     %f5, 200(%r3)
446  lfd     %f6, 208(%r3)
447  lfd     %f7, 216(%r3)
448  lfd     %f8, 224(%r3)
449  lfd     %f9, 232(%r3)
450  lfd     %f10,240(%r3)
451  lfd     %f11,248(%r3)
452  lfd     %f12,256(%r3)
453  lfd     %f13,264(%r3)
454  lfd     %f14,272(%r3)
455  lfd     %f15,280(%r3)
456  lfd     %f16,288(%r3)
457  lfd     %f17,296(%r3)
458  lfd     %f18,304(%r3)
459  lfd     %f19,312(%r3)
460  lfd     %f20,320(%r3)
461  lfd     %f21,328(%r3)
462  lfd     %f22,336(%r3)
463  lfd     %f23,344(%r3)
464  lfd     %f24,352(%r3)
465  lfd     %f25,360(%r3)
466  lfd     %f26,368(%r3)
467  lfd     %f27,376(%r3)
468  lfd     %f28,384(%r3)
469  lfd     %f29,392(%r3)
470  lfd     %f30,400(%r3)
471  lfd     %f31,408(%r3)
472
473  // restore vector registers if any are in use
474  lwz     %r5, 156(%r3)       // test VRsave
475  cmpwi   %r5, 0
476  beq     Lnovec
477
478  subi    %r4, %r1, 16
479  rlwinm  %r4, %r4, 0, 0, 27  // mask low 4-bits
480  // r4 is now a 16-byte aligned pointer into the red zone
481  // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
482
483
484#define LOAD_VECTOR_UNALIGNEDl(_index) \
485  andis.  %r0, %r5, (1<<(15-_index))  SEPARATOR \
486  beq     Ldone ## _index             SEPARATOR \
487  lwz     %r0, 424+_index*16(%r3)     SEPARATOR \
488  stw     %r0, 0(%r4)                 SEPARATOR \
489  lwz     %r0, 424+_index*16+4(%r3)   SEPARATOR \
490  stw     %r0, 4(%r4)                 SEPARATOR \
491  lwz     %r0, 424+_index*16+8(%r3)   SEPARATOR \
492  stw     %r0, 8(%r4)                 SEPARATOR \
493  lwz     %r0, 424+_index*16+12(%r3)  SEPARATOR \
494  stw     %r0, 12(%r4)                SEPARATOR \
495  lvx     %v ## _index, 0, %r4        SEPARATOR \
496  Ldone ## _index:
497
498#define LOAD_VECTOR_UNALIGNEDh(_index) \
499  andi.   %r0, %r5, (1<<(31-_index))  SEPARATOR \
500  beq     Ldone ## _index             SEPARATOR \
501  lwz     %r0, 424+_index*16(%r3)     SEPARATOR \
502  stw     %r0, 0(%r4)                 SEPARATOR \
503  lwz     %r0, 424+_index*16+4(%r3)   SEPARATOR \
504  stw     %r0, 4(%r4)                 SEPARATOR \
505  lwz     %r0, 424+_index*16+8(%r3)   SEPARATOR \
506  stw     %r0, 8(%r4)                 SEPARATOR \
507  lwz     %r0, 424+_index*16+12(%r3)  SEPARATOR \
508  stw     %r0, 12(%r4)                SEPARATOR \
509  lvx     %v ## _index, 0, %r4        SEPARATOR \
510  Ldone ## _index:
511
512
513  LOAD_VECTOR_UNALIGNEDl(0)
514  LOAD_VECTOR_UNALIGNEDl(1)
515  LOAD_VECTOR_UNALIGNEDl(2)
516  LOAD_VECTOR_UNALIGNEDl(3)
517  LOAD_VECTOR_UNALIGNEDl(4)
518  LOAD_VECTOR_UNALIGNEDl(5)
519  LOAD_VECTOR_UNALIGNEDl(6)
520  LOAD_VECTOR_UNALIGNEDl(7)
521  LOAD_VECTOR_UNALIGNEDl(8)
522  LOAD_VECTOR_UNALIGNEDl(9)
523  LOAD_VECTOR_UNALIGNEDl(10)
524  LOAD_VECTOR_UNALIGNEDl(11)
525  LOAD_VECTOR_UNALIGNEDl(12)
526  LOAD_VECTOR_UNALIGNEDl(13)
527  LOAD_VECTOR_UNALIGNEDl(14)
528  LOAD_VECTOR_UNALIGNEDl(15)
529  LOAD_VECTOR_UNALIGNEDh(16)
530  LOAD_VECTOR_UNALIGNEDh(17)
531  LOAD_VECTOR_UNALIGNEDh(18)
532  LOAD_VECTOR_UNALIGNEDh(19)
533  LOAD_VECTOR_UNALIGNEDh(20)
534  LOAD_VECTOR_UNALIGNEDh(21)
535  LOAD_VECTOR_UNALIGNEDh(22)
536  LOAD_VECTOR_UNALIGNEDh(23)
537  LOAD_VECTOR_UNALIGNEDh(24)
538  LOAD_VECTOR_UNALIGNEDh(25)
539  LOAD_VECTOR_UNALIGNEDh(26)
540  LOAD_VECTOR_UNALIGNEDh(27)
541  LOAD_VECTOR_UNALIGNEDh(28)
542  LOAD_VECTOR_UNALIGNEDh(29)
543  LOAD_VECTOR_UNALIGNEDh(30)
544  LOAD_VECTOR_UNALIGNEDh(31)
545
546Lnovec:
547  lwz     %r0, 136(%r3)   // __cr
548  mtcr    %r0
549  lwz     %r0, 148(%r3)   // __ctr
550  mtctr   %r0
551  lwz     %r0,   0(%r3)   // __ssr0
552  mtctr   %r0
553  lwz     %r0,   8(%r3)   // do r0 now
554  lwz     %r5,  28(%r3)   // do r5 now
555  lwz     %r4,  24(%r3)   // do r4 now
556  lwz     %r1,  12(%r3)   // do sp now
557  lwz     %r3,  20(%r3)   // do r3 last
558  bctr
559
560#elif defined(__aarch64__)
561
562//
563// void libunwind::Registers_arm64::jumpto()
564//
565// On entry:
566//  thread_state pointer is in x0
567//
568  .p2align 2
569DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
570  // skip restore of x0,x1 for now
571  ldp    x2, x3,  [x0, #0x010]
572  ldp    x4, x5,  [x0, #0x020]
573  ldp    x6, x7,  [x0, #0x030]
574  ldp    x8, x9,  [x0, #0x040]
575  ldp    x10,x11, [x0, #0x050]
576  ldp    x12,x13, [x0, #0x060]
577  ldp    x14,x15, [x0, #0x070]
578  // x16 and x17 were clobbered by the call into the unwinder, so no point in
579  // restoring them.
580  ldp    x18,x19, [x0, #0x090]
581  ldp    x20,x21, [x0, #0x0A0]
582  ldp    x22,x23, [x0, #0x0B0]
583  ldp    x24,x25, [x0, #0x0C0]
584  ldp    x26,x27, [x0, #0x0D0]
585  ldp    x28,x29, [x0, #0x0E0]
586  ldr    x30,     [x0, #0x100]  // restore pc into lr
587
588  ldp    d0, d1,  [x0, #0x110]
589  ldp    d2, d3,  [x0, #0x120]
590  ldp    d4, d5,  [x0, #0x130]
591  ldp    d6, d7,  [x0, #0x140]
592  ldp    d8, d9,  [x0, #0x150]
593  ldp    d10,d11, [x0, #0x160]
594  ldp    d12,d13, [x0, #0x170]
595  ldp    d14,d15, [x0, #0x180]
596  ldp    d16,d17, [x0, #0x190]
597  ldp    d18,d19, [x0, #0x1A0]
598  ldp    d20,d21, [x0, #0x1B0]
599  ldp    d22,d23, [x0, #0x1C0]
600  ldp    d24,d25, [x0, #0x1D0]
601  ldp    d26,d27, [x0, #0x1E0]
602  ldp    d28,d29, [x0, #0x1F0]
603  ldr    d30,     [x0, #0x200]
604  ldr    d31,     [x0, #0x208]
605
606  // Finally, restore sp. This must be done after the the last read from the
607  // context struct, because it is allocated on the stack, and an exception
608  // could clobber the de-allocated portion of the stack after sp has been
609  // restored.
610  ldr    x16,     [x0, #0x0F8]
611  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
612  mov    sp,x16                 // restore sp
613  ret    x30                    // jump to pc
614
615#elif defined(__arm__) && !defined(__APPLE__)
616
617#if !defined(__ARM_ARCH_ISA_ARM)
618#if (__ARM_ARCH_ISA_THUMB == 2)
619  .syntax unified
620#endif
621  .thumb
622#endif
623
624@
625@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
626@
627@ On entry:
628@  thread_state pointer is in r0
629@
630  .p2align 2
631DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
632#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
633  @ r8-r11: ldm into r1-r4, then mov to r8-r11
634  adds r0, #0x20
635  ldm r0!, {r1-r4}
636  subs r0, #0x30
637  mov r8, r1
638  mov r9, r2
639  mov r10, r3
640  mov r11, r4
641  @ r12 does not need loading, it it the intra-procedure-call scratch register
642  ldr r2, [r0, #0x34]
643  ldr r3, [r0, #0x3c]
644  mov sp, r2
645  mov lr, r3         @ restore pc into lr
646  ldm r0, {r0-r7}
647#else
648  @ Use lr as base so that r0 can be restored.
649  mov lr, r0
650  @ 32bit thumb-2 restrictions for ldm:
651  @ . the sp (r13) cannot be in the list
652  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
653  ldm lr, {r0-r12}
654  ldr sp, [lr, #52]
655  ldr lr, [lr, #60]  @ restore pc into lr
656#endif
657  JMP(lr)
658
659@
660@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
661@
662@ On entry:
663@  values pointer is in r0
664@
665  .p2align 2
666#if defined(__ELF__)
667  .fpu vfpv3-d16
668#endif
669DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
670  @ VFP and iwMMX instructions are only available when compiling with the flags
671  @ that enable them. We do not want to do that in the library (because we do not
672  @ want the compiler to generate instructions that access those) but this is
673  @ only accessed if the personality routine needs these registers. Use of
674  @ these registers implies they are, actually, available on the target, so
675  @ it's ok to execute.
676  @ So, generate the instruction using the corresponding coprocessor mnemonic.
677  vldmia r0, {d0-d15}
678  JMP(lr)
679
680@
681@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
682@
683@ On entry:
684@  values pointer is in r0
685@
686  .p2align 2
687#if defined(__ELF__)
688  .fpu vfpv3-d16
689#endif
690DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
691  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
692  JMP(lr)
693
694@
695@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
696@
697@ On entry:
698@  values pointer is in r0
699@
700  .p2align 2
701#if defined(__ELF__)
702  .fpu vfpv3
703#endif
704DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
705  vldmia r0, {d16-d31}
706  JMP(lr)
707
708#if defined(__ARM_WMMX)
709
710@
711@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
712@
713@ On entry:
714@  values pointer is in r0
715@
716  .p2align 2
717#if defined(__ELF__)
718  .arch armv5te
719#endif
720DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
721  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
722  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
723  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
724  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
725  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
726  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
727  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
728  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
729  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
730  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
731  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
732  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
733  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
734  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
735  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
736  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
737  JMP(lr)
738
739@
740@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
741@
742@ On entry:
743@  values pointer is in r0
744@
745  .p2align 2
746#if defined(__ELF__)
747  .arch armv5te
748#endif
749DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
750  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
751  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
752  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
753  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
754  JMP(lr)
755
756#endif
757
758#elif defined(__or1k__)
759
760DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
761#
762# void libunwind::Registers_or1k::jumpto()
763#
764# On entry:
765#  thread_state pointer is in r3
766#
767
768  # restore integral registers
769  l.lwz     r0,  0(r3)
770  l.lwz     r1,  4(r3)
771  l.lwz     r2,  8(r3)
772  # skip r3 for now
773  l.lwz     r4, 16(r3)
774  l.lwz     r5, 20(r3)
775  l.lwz     r6, 24(r3)
776  l.lwz     r7, 28(r3)
777  l.lwz     r8, 32(r3)
778  # skip r9
779  l.lwz    r10, 40(r3)
780  l.lwz    r11, 44(r3)
781  l.lwz    r12, 48(r3)
782  l.lwz    r13, 52(r3)
783  l.lwz    r14, 56(r3)
784  l.lwz    r15, 60(r3)
785  l.lwz    r16, 64(r3)
786  l.lwz    r17, 68(r3)
787  l.lwz    r18, 72(r3)
788  l.lwz    r19, 76(r3)
789  l.lwz    r20, 80(r3)
790  l.lwz    r21, 84(r3)
791  l.lwz    r22, 88(r3)
792  l.lwz    r23, 92(r3)
793  l.lwz    r24, 96(r3)
794  l.lwz    r25,100(r3)
795  l.lwz    r26,104(r3)
796  l.lwz    r27,108(r3)
797  l.lwz    r28,112(r3)
798  l.lwz    r29,116(r3)
799  l.lwz    r30,120(r3)
800  l.lwz    r31,124(r3)
801
802  # at last, restore r3
803  l.lwz    r3,  12(r3)
804
805  # load new pc into ra
806  l.lwz    r9, 128(r3)
807  # jump to pc
808  l.jr     r9
809   l.nop
810
811#elif defined(__hexagon__)
812# On entry:
813#  thread_state pointer is in r2
814DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
815#
816# void libunwind::Registers_hexagon::jumpto()
817#
818  r8 = memw(r0+#32)
819  r9 = memw(r0+#36)
820  r10 = memw(r0+#40)
821  r11 = memw(r0+#44)
822
823  r12 = memw(r0+#48)
824  r13 = memw(r0+#52)
825  r14 = memw(r0+#56)
826  r15 = memw(r0+#60)
827
828  r16 = memw(r0+#64)
829  r17 = memw(r0+#68)
830  r18 = memw(r0+#72)
831  r19 = memw(r0+#76)
832
833  r20 = memw(r0+#80)
834  r21 = memw(r0+#84)
835  r22 = memw(r0+#88)
836  r23 = memw(r0+#92)
837
838  r24 = memw(r0+#96)
839  r25 = memw(r0+#100)
840  r26 = memw(r0+#104)
841  r27 = memw(r0+#108)
842
843  r28 = memw(r0+#112)
844  r29 = memw(r0+#116)
845  r30 = memw(r0+#120)
846  r31 = memw(r0+#132)
847
848  r1 = memw(r0+#128)
849  c4 = r1   // Predicate register
850  r1 = memw(r0+#4)
851  r0 = memw(r0)
852  jumpr r31
853#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
854
855//
856// void libunwind::Registers_mips_o32::jumpto()
857//
858// On entry:
859//  thread state pointer is in a0 ($4)
860//
861DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
862  .set push
863  .set noat
864  .set noreorder
865  .set nomacro
866#ifdef __mips_hard_float
867#if __mips_fpr != 64
868  ldc1  $f0, (4 * 36 + 8 * 0)($4)
869  ldc1  $f2, (4 * 36 + 8 * 2)($4)
870  ldc1  $f4, (4 * 36 + 8 * 4)($4)
871  ldc1  $f6, (4 * 36 + 8 * 6)($4)
872  ldc1  $f8, (4 * 36 + 8 * 8)($4)
873  ldc1  $f10, (4 * 36 + 8 * 10)($4)
874  ldc1  $f12, (4 * 36 + 8 * 12)($4)
875  ldc1  $f14, (4 * 36 + 8 * 14)($4)
876  ldc1  $f16, (4 * 36 + 8 * 16)($4)
877  ldc1  $f18, (4 * 36 + 8 * 18)($4)
878  ldc1  $f20, (4 * 36 + 8 * 20)($4)
879  ldc1  $f22, (4 * 36 + 8 * 22)($4)
880  ldc1  $f24, (4 * 36 + 8 * 24)($4)
881  ldc1  $f26, (4 * 36 + 8 * 26)($4)
882  ldc1  $f28, (4 * 36 + 8 * 28)($4)
883  ldc1  $f30, (4 * 36 + 8 * 30)($4)
884#else
885  ldc1  $f0, (4 * 36 + 8 * 0)($4)
886  ldc1  $f1, (4 * 36 + 8 * 1)($4)
887  ldc1  $f2, (4 * 36 + 8 * 2)($4)
888  ldc1  $f3, (4 * 36 + 8 * 3)($4)
889  ldc1  $f4, (4 * 36 + 8 * 4)($4)
890  ldc1  $f5, (4 * 36 + 8 * 5)($4)
891  ldc1  $f6, (4 * 36 + 8 * 6)($4)
892  ldc1  $f7, (4 * 36 + 8 * 7)($4)
893  ldc1  $f8, (4 * 36 + 8 * 8)($4)
894  ldc1  $f9, (4 * 36 + 8 * 9)($4)
895  ldc1  $f10, (4 * 36 + 8 * 10)($4)
896  ldc1  $f11, (4 * 36 + 8 * 11)($4)
897  ldc1  $f12, (4 * 36 + 8 * 12)($4)
898  ldc1  $f13, (4 * 36 + 8 * 13)($4)
899  ldc1  $f14, (4 * 36 + 8 * 14)($4)
900  ldc1  $f15, (4 * 36 + 8 * 15)($4)
901  ldc1  $f16, (4 * 36 + 8 * 16)($4)
902  ldc1  $f17, (4 * 36 + 8 * 17)($4)
903  ldc1  $f18, (4 * 36 + 8 * 18)($4)
904  ldc1  $f19, (4 * 36 + 8 * 19)($4)
905  ldc1  $f20, (4 * 36 + 8 * 20)($4)
906  ldc1  $f21, (4 * 36 + 8 * 21)($4)
907  ldc1  $f22, (4 * 36 + 8 * 22)($4)
908  ldc1  $f23, (4 * 36 + 8 * 23)($4)
909  ldc1  $f24, (4 * 36 + 8 * 24)($4)
910  ldc1  $f25, (4 * 36 + 8 * 25)($4)
911  ldc1  $f26, (4 * 36 + 8 * 26)($4)
912  ldc1  $f27, (4 * 36 + 8 * 27)($4)
913  ldc1  $f28, (4 * 36 + 8 * 28)($4)
914  ldc1  $f29, (4 * 36 + 8 * 29)($4)
915  ldc1  $f30, (4 * 36 + 8 * 30)($4)
916  ldc1  $f31, (4 * 36 + 8 * 31)($4)
917#endif
918#endif
919  // restore hi and lo
920  lw    $8, (4 * 33)($4)
921  mthi  $8
922  lw    $8, (4 * 34)($4)
923  mtlo  $8
924  // r0 is zero
925  lw    $1, (4 * 1)($4)
926  lw    $2, (4 * 2)($4)
927  lw    $3, (4 * 3)($4)
928  // skip a0 for now
929  lw    $5, (4 * 5)($4)
930  lw    $6, (4 * 6)($4)
931  lw    $7, (4 * 7)($4)
932  lw    $8, (4 * 8)($4)
933  lw    $9, (4 * 9)($4)
934  lw    $10, (4 * 10)($4)
935  lw    $11, (4 * 11)($4)
936  lw    $12, (4 * 12)($4)
937  lw    $13, (4 * 13)($4)
938  lw    $14, (4 * 14)($4)
939  lw    $15, (4 * 15)($4)
940  lw    $16, (4 * 16)($4)
941  lw    $17, (4 * 17)($4)
942  lw    $18, (4 * 18)($4)
943  lw    $19, (4 * 19)($4)
944  lw    $20, (4 * 20)($4)
945  lw    $21, (4 * 21)($4)
946  lw    $22, (4 * 22)($4)
947  lw    $23, (4 * 23)($4)
948  lw    $24, (4 * 24)($4)
949  lw    $25, (4 * 25)($4)
950  lw    $26, (4 * 26)($4)
951  lw    $27, (4 * 27)($4)
952  lw    $28, (4 * 28)($4)
953  lw    $29, (4 * 29)($4)
954  lw    $30, (4 * 30)($4)
955  // load new pc into ra
956  lw    $31, (4 * 32)($4)
957  // jump to ra, load a0 in the delay slot
958  jr    $31
959  lw    $4, (4 * 4)($4)
960  .set pop
961
962#elif defined(__mips64)
963
964//
965// void libunwind::Registers_mips_newabi::jumpto()
966//
967// On entry:
968//  thread state pointer is in a0 ($4)
969//
970DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
971  .set push
972  .set noat
973  .set noreorder
974  .set nomacro
975#ifdef __mips_hard_float
976  ldc1  $f0, (8 * 35)($4)
977  ldc1  $f1, (8 * 36)($4)
978  ldc1  $f2, (8 * 37)($4)
979  ldc1  $f3, (8 * 38)($4)
980  ldc1  $f4, (8 * 39)($4)
981  ldc1  $f5, (8 * 40)($4)
982  ldc1  $f6, (8 * 41)($4)
983  ldc1  $f7, (8 * 42)($4)
984  ldc1  $f8, (8 * 43)($4)
985  ldc1  $f9, (8 * 44)($4)
986  ldc1  $f10, (8 * 45)($4)
987  ldc1  $f11, (8 * 46)($4)
988  ldc1  $f12, (8 * 47)($4)
989  ldc1  $f13, (8 * 48)($4)
990  ldc1  $f14, (8 * 49)($4)
991  ldc1  $f15, (8 * 50)($4)
992  ldc1  $f16, (8 * 51)($4)
993  ldc1  $f17, (8 * 52)($4)
994  ldc1  $f18, (8 * 53)($4)
995  ldc1  $f19, (8 * 54)($4)
996  ldc1  $f20, (8 * 55)($4)
997  ldc1  $f21, (8 * 56)($4)
998  ldc1  $f22, (8 * 57)($4)
999  ldc1  $f23, (8 * 58)($4)
1000  ldc1  $f24, (8 * 59)($4)
1001  ldc1  $f25, (8 * 60)($4)
1002  ldc1  $f26, (8 * 61)($4)
1003  ldc1  $f27, (8 * 62)($4)
1004  ldc1  $f28, (8 * 63)($4)
1005  ldc1  $f29, (8 * 64)($4)
1006  ldc1  $f30, (8 * 65)($4)
1007  ldc1  $f31, (8 * 66)($4)
1008#endif
1009  // restore hi and lo
1010  ld    $8, (8 * 33)($4)
1011  mthi  $8
1012  ld    $8, (8 * 34)($4)
1013  mtlo  $8
1014  // r0 is zero
1015  ld    $1, (8 * 1)($4)
1016  ld    $2, (8 * 2)($4)
1017  ld    $3, (8 * 3)($4)
1018  // skip a0 for now
1019  ld    $5, (8 * 5)($4)
1020  ld    $6, (8 * 6)($4)
1021  ld    $7, (8 * 7)($4)
1022  ld    $8, (8 * 8)($4)
1023  ld    $9, (8 * 9)($4)
1024  ld    $10, (8 * 10)($4)
1025  ld    $11, (8 * 11)($4)
1026  ld    $12, (8 * 12)($4)
1027  ld    $13, (8 * 13)($4)
1028  ld    $14, (8 * 14)($4)
1029  ld    $15, (8 * 15)($4)
1030  ld    $16, (8 * 16)($4)
1031  ld    $17, (8 * 17)($4)
1032  ld    $18, (8 * 18)($4)
1033  ld    $19, (8 * 19)($4)
1034  ld    $20, (8 * 20)($4)
1035  ld    $21, (8 * 21)($4)
1036  ld    $22, (8 * 22)($4)
1037  ld    $23, (8 * 23)($4)
1038  ld    $24, (8 * 24)($4)
1039  ld    $25, (8 * 25)($4)
1040  ld    $26, (8 * 26)($4)
1041  ld    $27, (8 * 27)($4)
1042  ld    $28, (8 * 28)($4)
1043  ld    $29, (8 * 29)($4)
1044  ld    $30, (8 * 30)($4)
1045  // load new pc into ra
1046  ld    $31, (8 * 32)($4)
1047  // jump to ra, load a0 in the delay slot
1048  jr    $31
1049  ld    $4, (8 * 4)($4)
1050  .set pop
1051
1052#elif defined(__sparc__)
1053
1054//
1055// void libunwind::Registers_sparc_o32::jumpto()
1056//
1057// On entry:
1058//  thread_state pointer is in o0
1059//
1060DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1061  ta 3
1062  ldd [%o0 + 64],  %l0
1063  ldd [%o0 + 72],  %l2
1064  ldd [%o0 + 80],  %l4
1065  ldd [%o0 + 88],  %l6
1066  ldd [%o0 + 96],  %i0
1067  ldd [%o0 + 104], %i2
1068  ldd [%o0 + 112], %i4
1069  ldd [%o0 + 120], %i6
1070  ld  [%o0 + 60],  %o7
1071  jmp %o7
1072   nop
1073
1074#elif defined(__riscv) && __riscv_xlen == 64
1075
1076//
1077// void libunwind::Registers_riscv::jumpto()
1078//
1079// On entry:
1080//  thread_state pointer is in a0
1081//
1082  .p2align 2
1083DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1084#if defined(__riscv_flen) && __riscv_flen == 64
1085  fld    f0, (8 * 32 + 8 * 0)(a0)
1086  fld    f1, (8 * 32 + 8 * 1)(a0)
1087  fld    f2, (8 * 32 + 8 * 2)(a0)
1088  fld    f3, (8 * 32 + 8 * 3)(a0)
1089  fld    f4, (8 * 32 + 8 * 4)(a0)
1090  fld    f5, (8 * 32 + 8 * 5)(a0)
1091  fld    f6, (8 * 32 + 8 * 6)(a0)
1092  fld    f7, (8 * 32 + 8 * 7)(a0)
1093  fld    f8, (8 * 32 + 8 * 8)(a0)
1094  fld    f9, (8 * 32 + 8 * 9)(a0)
1095  fld    f10, (8 * 32 + 8 * 10)(a0)
1096  fld    f11, (8 * 32 + 8 * 11)(a0)
1097  fld    f12, (8 * 32 + 8 * 12)(a0)
1098  fld    f13, (8 * 32 + 8 * 13)(a0)
1099  fld    f14, (8 * 32 + 8 * 14)(a0)
1100  fld    f15, (8 * 32 + 8 * 15)(a0)
1101  fld    f16, (8 * 32 + 8 * 16)(a0)
1102  fld    f17, (8 * 32 + 8 * 17)(a0)
1103  fld    f18, (8 * 32 + 8 * 18)(a0)
1104  fld    f19, (8 * 32 + 8 * 19)(a0)
1105  fld    f20, (8 * 32 + 8 * 20)(a0)
1106  fld    f21, (8 * 32 + 8 * 21)(a0)
1107  fld    f22, (8 * 32 + 8 * 22)(a0)
1108  fld    f23, (8 * 32 + 8 * 23)(a0)
1109  fld    f24, (8 * 32 + 8 * 24)(a0)
1110  fld    f25, (8 * 32 + 8 * 25)(a0)
1111  fld    f26, (8 * 32 + 8 * 26)(a0)
1112  fld    f27, (8 * 32 + 8 * 27)(a0)
1113  fld    f28, (8 * 32 + 8 * 28)(a0)
1114  fld    f29, (8 * 32 + 8 * 29)(a0)
1115  fld    f30, (8 * 32 + 8 * 30)(a0)
1116  fld    f31, (8 * 32 + 8 * 31)(a0)
1117#endif
1118
1119  // x0 is zero
1120  ld    x1, (8 * 0)(a0) // restore pc into ra
1121  ld    x2, (8 * 2)(a0)
1122  ld    x3, (8 * 3)(a0)
1123  ld    x4, (8 * 4)(a0)
1124  ld    x5, (8 * 5)(a0)
1125  ld    x6, (8 * 6)(a0)
1126  ld    x7, (8 * 7)(a0)
1127  ld    x8, (8 * 8)(a0)
1128  ld    x9, (8 * 9)(a0)
1129  // skip a0 for now
1130  ld    x11, (8 * 11)(a0)
1131  ld    x12, (8 * 12)(a0)
1132  ld    x13, (8 * 13)(a0)
1133  ld    x14, (8 * 14)(a0)
1134  ld    x15, (8 * 15)(a0)
1135  ld    x16, (8 * 16)(a0)
1136  ld    x17, (8 * 17)(a0)
1137  ld    x18, (8 * 18)(a0)
1138  ld    x19, (8 * 19)(a0)
1139  ld    x20, (8 * 20)(a0)
1140  ld    x21, (8 * 21)(a0)
1141  ld    x22, (8 * 22)(a0)
1142  ld    x23, (8 * 23)(a0)
1143  ld    x24, (8 * 24)(a0)
1144  ld    x25, (8 * 25)(a0)
1145  ld    x26, (8 * 26)(a0)
1146  ld    x27, (8 * 27)(a0)
1147  ld    x28, (8 * 28)(a0)
1148  ld    x29, (8 * 29)(a0)
1149  ld    x30, (8 * 30)(a0)
1150  ld    x31, (8 * 31)(a0)
1151  ld    x10, (8 * 10)(a0)   // restore a0
1152
1153  ret                       // jump to ra
1154
1155#endif
1156
1157#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1158
1159NO_EXEC_STACK_DIRECTIVE
1160
1161