xref: /freebsd/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S (revision 04eeddc0aa8e0a417a16eaf9d7d095207f4a8623)
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11  .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
17#
18# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
19#
20# On entry:
21#  +                       +
22#  +-----------------------+
23#  + thread_state pointer  +
24#  +-----------------------+
25#  + return address        +
26#  +-----------------------+   <-- SP
27#  +                       +
28
29  _LIBUNWIND_CET_ENDBR
30  movl   4(%esp), %eax
31  # set up eax and ret on new stack location
32  movl  28(%eax), %edx # edx holds new stack pointer
33  subl  $8,%edx
34  movl  %edx, 28(%eax)
35  movl  0(%eax), %ebx
36  movl  %ebx, 0(%edx)
37  movl  40(%eax), %ebx
38  movl  %ebx, 4(%edx)
39  # we now have ret and eax pushed onto where new stack will be
40  # restore all registers
41  movl   4(%eax), %ebx
42  movl   8(%eax), %ecx
43  movl  12(%eax), %edx
44  movl  16(%eax), %edi
45  movl  20(%eax), %esi
46  movl  24(%eax), %ebp
47  movl  28(%eax), %esp
48  # skip ss
49  # skip eflags
50  pop    %eax  # eax was already pushed on new stack
51  pop    %ecx
52  jmp    *%ecx
53  # skip cs
54  # skip ds
55  # skip es
56  # skip fs
57  # skip gs
58
59#elif defined(__x86_64__)
60
61DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
62#
63# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
64#
65#if defined(_WIN64)
66# On entry, thread_state pointer is in rcx; move it into rdi
67# to share restore code below. Since this routine restores and
68# overwrites all registers, we can use the same registers for
69# pointers and temporaries as on unix even though win64 normally
70# mustn't clobber some of them.
71  movq  %rcx, %rdi
72#else
73# On entry, thread_state pointer is in rdi
74#endif
75
76  _LIBUNWIND_CET_ENDBR
77  movq  56(%rdi), %rax # rax holds new stack pointer
78  subq  $16, %rax
79  movq  %rax, 56(%rdi)
80  movq  32(%rdi), %rbx  # store new rdi on new stack
81  movq  %rbx, 0(%rax)
82  movq  128(%rdi), %rbx # store new rip on new stack
83  movq  %rbx, 8(%rax)
84  # restore all registers
85  movq    0(%rdi), %rax
86  movq    8(%rdi), %rbx
87  movq   16(%rdi), %rcx
88  movq   24(%rdi), %rdx
89  # restore rdi later
90  movq   40(%rdi), %rsi
91  movq   48(%rdi), %rbp
92  # restore rsp later
93  movq   64(%rdi), %r8
94  movq   72(%rdi), %r9
95  movq   80(%rdi), %r10
96  movq   88(%rdi), %r11
97  movq   96(%rdi), %r12
98  movq  104(%rdi), %r13
99  movq  112(%rdi), %r14
100  movq  120(%rdi), %r15
101  # skip rflags
102  # skip cs
103  # skip fs
104  # skip gs
105
106#if defined(_WIN64)
107  movdqu 176(%rdi),%xmm0
108  movdqu 192(%rdi),%xmm1
109  movdqu 208(%rdi),%xmm2
110  movdqu 224(%rdi),%xmm3
111  movdqu 240(%rdi),%xmm4
112  movdqu 256(%rdi),%xmm5
113  movdqu 272(%rdi),%xmm6
114  movdqu 288(%rdi),%xmm7
115  movdqu 304(%rdi),%xmm8
116  movdqu 320(%rdi),%xmm9
117  movdqu 336(%rdi),%xmm10
118  movdqu 352(%rdi),%xmm11
119  movdqu 368(%rdi),%xmm12
120  movdqu 384(%rdi),%xmm13
121  movdqu 400(%rdi),%xmm14
122  movdqu 416(%rdi),%xmm15
123#endif
124  movq  56(%rdi), %rsp  # cut back rsp to new location
125  pop    %rdi      # rdi was saved here earlier
126  pop    %rcx
127  jmpq   *%rcx
128
129
130#elif defined(__powerpc64__)
131
132DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
133//
134// void libunwind::Registers_ppc64::jumpto()
135//
136// On entry:
137//  thread_state pointer is in r3
138//
139
140// load register (GPR)
141#define PPC64_LR(n) \
142  ld    n, (8 * (n + 2))(3)
143
144  // restore integral registers
145  // skip r0 for now
146  // skip r1 for now
147  PPC64_LR(2)
148  // skip r3 for now
149  // skip r4 for now
150  // skip r5 for now
151  PPC64_LR(6)
152  PPC64_LR(7)
153  PPC64_LR(8)
154  PPC64_LR(9)
155  PPC64_LR(10)
156  PPC64_LR(11)
157  PPC64_LR(12)
158  PPC64_LR(13)
159  PPC64_LR(14)
160  PPC64_LR(15)
161  PPC64_LR(16)
162  PPC64_LR(17)
163  PPC64_LR(18)
164  PPC64_LR(19)
165  PPC64_LR(20)
166  PPC64_LR(21)
167  PPC64_LR(22)
168  PPC64_LR(23)
169  PPC64_LR(24)
170  PPC64_LR(25)
171  PPC64_LR(26)
172  PPC64_LR(27)
173  PPC64_LR(28)
174  PPC64_LR(29)
175  PPC64_LR(30)
176  PPC64_LR(31)
177
178#if defined(__VSX__)
179
180  // restore VS registers
181  // (note that this also restores floating point registers and V registers,
182  // because part of VS is mapped to these registers)
183
184  addi  4, 3, PPC64_OFFS_FP
185
186// load VS register
187#define PPC64_LVS(n)         \
188  lxvd2x  n, 0, 4           ;\
189  addi    4, 4, 16
190
191  // restore the first 32 VS regs (and also all floating point regs)
192  PPC64_LVS(0)
193  PPC64_LVS(1)
194  PPC64_LVS(2)
195  PPC64_LVS(3)
196  PPC64_LVS(4)
197  PPC64_LVS(5)
198  PPC64_LVS(6)
199  PPC64_LVS(7)
200  PPC64_LVS(8)
201  PPC64_LVS(9)
202  PPC64_LVS(10)
203  PPC64_LVS(11)
204  PPC64_LVS(12)
205  PPC64_LVS(13)
206  PPC64_LVS(14)
207  PPC64_LVS(15)
208  PPC64_LVS(16)
209  PPC64_LVS(17)
210  PPC64_LVS(18)
211  PPC64_LVS(19)
212  PPC64_LVS(20)
213  PPC64_LVS(21)
214  PPC64_LVS(22)
215  PPC64_LVS(23)
216  PPC64_LVS(24)
217  PPC64_LVS(25)
218  PPC64_LVS(26)
219  PPC64_LVS(27)
220  PPC64_LVS(28)
221  PPC64_LVS(29)
222  PPC64_LVS(30)
223  PPC64_LVS(31)
224
225  // use VRSAVE to conditionally restore the remaining VS regs,
226  // that are where the V regs are mapped
227
228  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
229  cmpwi 5, 0
230  beq   Lnovec
231
232// conditionally load VS
233#define PPC64_CLVS_BOTTOM(n)               \
234  beq    Ldone##n                         ;\
235  addi   4, 3, PPC64_OFFS_FP + n * 16     ;\
236  lxvd2x n, 0, 4                          ;\
237Ldone##n:
238
239#define PPC64_CLVSl(n)                    \
240  andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n))  ;\
241PPC64_CLVS_BOTTOM(n)
242
243#define PPC64_CLVSh(n)                    \
244  andi.  0, 5, (1 PPC_LEFT_SHIFT(63-n))  ;\
245PPC64_CLVS_BOTTOM(n)
246
247  PPC64_CLVSl(32)
248  PPC64_CLVSl(33)
249  PPC64_CLVSl(34)
250  PPC64_CLVSl(35)
251  PPC64_CLVSl(36)
252  PPC64_CLVSl(37)
253  PPC64_CLVSl(38)
254  PPC64_CLVSl(39)
255  PPC64_CLVSl(40)
256  PPC64_CLVSl(41)
257  PPC64_CLVSl(42)
258  PPC64_CLVSl(43)
259  PPC64_CLVSl(44)
260  PPC64_CLVSl(45)
261  PPC64_CLVSl(46)
262  PPC64_CLVSl(47)
263  PPC64_CLVSh(48)
264  PPC64_CLVSh(49)
265  PPC64_CLVSh(50)
266  PPC64_CLVSh(51)
267  PPC64_CLVSh(52)
268  PPC64_CLVSh(53)
269  PPC64_CLVSh(54)
270  PPC64_CLVSh(55)
271  PPC64_CLVSh(56)
272  PPC64_CLVSh(57)
273  PPC64_CLVSh(58)
274  PPC64_CLVSh(59)
275  PPC64_CLVSh(60)
276  PPC64_CLVSh(61)
277  PPC64_CLVSh(62)
278  PPC64_CLVSh(63)
279
280#else
281
282// load FP register
283#define PPC64_LF(n) \
284  lfd   n, (PPC64_OFFS_FP + n * 16)(3)
285
286  // restore float registers
287  PPC64_LF(0)
288  PPC64_LF(1)
289  PPC64_LF(2)
290  PPC64_LF(3)
291  PPC64_LF(4)
292  PPC64_LF(5)
293  PPC64_LF(6)
294  PPC64_LF(7)
295  PPC64_LF(8)
296  PPC64_LF(9)
297  PPC64_LF(10)
298  PPC64_LF(11)
299  PPC64_LF(12)
300  PPC64_LF(13)
301  PPC64_LF(14)
302  PPC64_LF(15)
303  PPC64_LF(16)
304  PPC64_LF(17)
305  PPC64_LF(18)
306  PPC64_LF(19)
307  PPC64_LF(20)
308  PPC64_LF(21)
309  PPC64_LF(22)
310  PPC64_LF(23)
311  PPC64_LF(24)
312  PPC64_LF(25)
313  PPC64_LF(26)
314  PPC64_LF(27)
315  PPC64_LF(28)
316  PPC64_LF(29)
317  PPC64_LF(30)
318  PPC64_LF(31)
319
320#if defined(__ALTIVEC__)
321  // restore vector registers if any are in use
322  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
323  cmpwi 5, 0
324  beq   Lnovec
325
326  subi  4, 1, 16
327  // r4 is now a 16-byte aligned pointer into the red zone
328  // the _vectorScalarRegisters may not be 16-byte aligned
329  // so copy via red zone temp buffer
330
331#define PPC64_CLV_UNALIGNED_BOTTOM(n)            \
332  beq    Ldone##n                               ;\
333  ld     0, (PPC64_OFFS_V + n * 16)(3)          ;\
334  std    0, 0(4)                                ;\
335  ld     0, (PPC64_OFFS_V + n * 16 + 8)(3)      ;\
336  std    0, 8(4)                                ;\
337  lvx    n, 0, 4                                ;\
338Ldone  ## n:
339
340#define PPC64_CLV_UNALIGNEDl(n)                 \
341  andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n))        ;\
342PPC64_CLV_UNALIGNED_BOTTOM(n)
343
344#define PPC64_CLV_UNALIGNEDh(n)                \
345  andi.  0, 5, (1 PPC_LEFT_SHIFT(31-n))       ;\
346PPC64_CLV_UNALIGNED_BOTTOM(n)
347
348  PPC64_CLV_UNALIGNEDl(0)
349  PPC64_CLV_UNALIGNEDl(1)
350  PPC64_CLV_UNALIGNEDl(2)
351  PPC64_CLV_UNALIGNEDl(3)
352  PPC64_CLV_UNALIGNEDl(4)
353  PPC64_CLV_UNALIGNEDl(5)
354  PPC64_CLV_UNALIGNEDl(6)
355  PPC64_CLV_UNALIGNEDl(7)
356  PPC64_CLV_UNALIGNEDl(8)
357  PPC64_CLV_UNALIGNEDl(9)
358  PPC64_CLV_UNALIGNEDl(10)
359  PPC64_CLV_UNALIGNEDl(11)
360  PPC64_CLV_UNALIGNEDl(12)
361  PPC64_CLV_UNALIGNEDl(13)
362  PPC64_CLV_UNALIGNEDl(14)
363  PPC64_CLV_UNALIGNEDl(15)
364  PPC64_CLV_UNALIGNEDh(16)
365  PPC64_CLV_UNALIGNEDh(17)
366  PPC64_CLV_UNALIGNEDh(18)
367  PPC64_CLV_UNALIGNEDh(19)
368  PPC64_CLV_UNALIGNEDh(20)
369  PPC64_CLV_UNALIGNEDh(21)
370  PPC64_CLV_UNALIGNEDh(22)
371  PPC64_CLV_UNALIGNEDh(23)
372  PPC64_CLV_UNALIGNEDh(24)
373  PPC64_CLV_UNALIGNEDh(25)
374  PPC64_CLV_UNALIGNEDh(26)
375  PPC64_CLV_UNALIGNEDh(27)
376  PPC64_CLV_UNALIGNEDh(28)
377  PPC64_CLV_UNALIGNEDh(29)
378  PPC64_CLV_UNALIGNEDh(30)
379  PPC64_CLV_UNALIGNEDh(31)
380
381#endif
382#endif
383
384Lnovec:
385  ld    0, PPC64_OFFS_CR(3)
386  mtcr  0
387  ld    0, PPC64_OFFS_SRR0(3)
388  mtctr 0
389
390  PPC64_LR(0)
391  PPC64_LR(5)
392  PPC64_LR(4)
393  PPC64_LR(1)
394  PPC64_LR(3)
395  bctr
396
397#elif defined(__ppc__)
398
399DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
400//
401// void libunwind::Registers_ppc::jumpto()
402//
403// On entry:
404//  thread_state pointer is in r3
405//
406
407  // restore integral registerrs
408  // skip r0 for now
409  // skip r1 for now
410  lwz     2,  16(3)
411  // skip r3 for now
412  // skip r4 for now
413  // skip r5 for now
414  lwz     6,  32(3)
415  lwz     7,  36(3)
416  lwz     8,  40(3)
417  lwz     9,  44(3)
418  lwz     10, 48(3)
419  lwz     11, 52(3)
420  lwz     12, 56(3)
421  lwz     13, 60(3)
422  lwz     14, 64(3)
423  lwz     15, 68(3)
424  lwz     16, 72(3)
425  lwz     17, 76(3)
426  lwz     18, 80(3)
427  lwz     19, 84(3)
428  lwz     20, 88(3)
429  lwz     21, 92(3)
430  lwz     22, 96(3)
431  lwz     23,100(3)
432  lwz     24,104(3)
433  lwz     25,108(3)
434  lwz     26,112(3)
435  lwz     27,116(3)
436  lwz     28,120(3)
437  lwz     29,124(3)
438  lwz     30,128(3)
439  lwz     31,132(3)
440
441#ifndef __NO_FPRS__
442  // restore float registers
443  lfd     0, 160(3)
444  lfd     1, 168(3)
445  lfd     2, 176(3)
446  lfd     3, 184(3)
447  lfd     4, 192(3)
448  lfd     5, 200(3)
449  lfd     6, 208(3)
450  lfd     7, 216(3)
451  lfd     8, 224(3)
452  lfd     9, 232(3)
453  lfd     10,240(3)
454  lfd     11,248(3)
455  lfd     12,256(3)
456  lfd     13,264(3)
457  lfd     14,272(3)
458  lfd     15,280(3)
459  lfd     16,288(3)
460  lfd     17,296(3)
461  lfd     18,304(3)
462  lfd     19,312(3)
463  lfd     20,320(3)
464  lfd     21,328(3)
465  lfd     22,336(3)
466  lfd     23,344(3)
467  lfd     24,352(3)
468  lfd     25,360(3)
469  lfd     26,368(3)
470  lfd     27,376(3)
471  lfd     28,384(3)
472  lfd     29,392(3)
473  lfd     30,400(3)
474  lfd     31,408(3)
475#endif
476
477#if defined(__ALTIVEC__)
478  // restore vector registers if any are in use
479  lwz     5, 156(3)       // test VRsave
480  cmpwi   5, 0
481  beq     Lnovec
482
483  subi    4, 1, 16
484  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
485  // r4 is now a 16-byte aligned pointer into the red zone
486  // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
487
488
489#define LOAD_VECTOR_UNALIGNEDl(_index)          \
490  andis.  0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
491  beq     Ldone ## _index             SEPARATOR \
492  lwz     0, 424+_index*16(3)         SEPARATOR \
493  stw     0, 0(%r4)                   SEPARATOR \
494  lwz     0, 424+_index*16+4(%r3)     SEPARATOR \
495  stw     0, 4(%r4)                   SEPARATOR \
496  lwz     0, 424+_index*16+8(%r3)     SEPARATOR \
497  stw     0, 8(%r4)                   SEPARATOR \
498  lwz     0, 424+_index*16+12(%r3)    SEPARATOR \
499  stw     0, 12(%r4)                  SEPARATOR \
500  lvx     _index, 0, 4                SEPARATOR \
501  Ldone ## _index:
502
503#define LOAD_VECTOR_UNALIGNEDh(_index)          \
504  andi.   0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
505  beq     Ldone ## _index             SEPARATOR \
506  lwz     0, 424+_index*16(3)         SEPARATOR \
507  stw     0, 0(4)                     SEPARATOR \
508  lwz     0, 424+_index*16+4(3)       SEPARATOR \
509  stw     0, 4(4)                     SEPARATOR \
510  lwz     0, 424+_index*16+8(3)       SEPARATOR \
511  stw     0, 8(%r4)                   SEPARATOR \
512  lwz     0, 424+_index*16+12(3)      SEPARATOR \
513  stw     0, 12(4)                    SEPARATOR \
514  lvx     _index, 0, 4                SEPARATOR \
515  Ldone ## _index:
516
517
518  LOAD_VECTOR_UNALIGNEDl(0)
519  LOAD_VECTOR_UNALIGNEDl(1)
520  LOAD_VECTOR_UNALIGNEDl(2)
521  LOAD_VECTOR_UNALIGNEDl(3)
522  LOAD_VECTOR_UNALIGNEDl(4)
523  LOAD_VECTOR_UNALIGNEDl(5)
524  LOAD_VECTOR_UNALIGNEDl(6)
525  LOAD_VECTOR_UNALIGNEDl(7)
526  LOAD_VECTOR_UNALIGNEDl(8)
527  LOAD_VECTOR_UNALIGNEDl(9)
528  LOAD_VECTOR_UNALIGNEDl(10)
529  LOAD_VECTOR_UNALIGNEDl(11)
530  LOAD_VECTOR_UNALIGNEDl(12)
531  LOAD_VECTOR_UNALIGNEDl(13)
532  LOAD_VECTOR_UNALIGNEDl(14)
533  LOAD_VECTOR_UNALIGNEDl(15)
534  LOAD_VECTOR_UNALIGNEDh(16)
535  LOAD_VECTOR_UNALIGNEDh(17)
536  LOAD_VECTOR_UNALIGNEDh(18)
537  LOAD_VECTOR_UNALIGNEDh(19)
538  LOAD_VECTOR_UNALIGNEDh(20)
539  LOAD_VECTOR_UNALIGNEDh(21)
540  LOAD_VECTOR_UNALIGNEDh(22)
541  LOAD_VECTOR_UNALIGNEDh(23)
542  LOAD_VECTOR_UNALIGNEDh(24)
543  LOAD_VECTOR_UNALIGNEDh(25)
544  LOAD_VECTOR_UNALIGNEDh(26)
545  LOAD_VECTOR_UNALIGNEDh(27)
546  LOAD_VECTOR_UNALIGNEDh(28)
547  LOAD_VECTOR_UNALIGNEDh(29)
548  LOAD_VECTOR_UNALIGNEDh(30)
549  LOAD_VECTOR_UNALIGNEDh(31)
550#endif
551
552Lnovec:
553  lwz     0, 136(3)   // __cr
554  mtcr    0
555  lwz     0, 148(3)   // __ctr
556  mtctr   0
557  lwz     0,   0(3)   // __ssr0
558  mtctr   0
559  lwz     0,   8(3)   // do r0 now
560  lwz     5,  28(3)   // do r5 now
561  lwz     4,  24(3)   // do r4 now
562  lwz     1,  12(3)   // do sp now
563  lwz     3,  20(3)   // do r3 last
564  bctr
565
566#elif defined(__aarch64__)
567
568//
569// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
570//
571// On entry:
572//  thread_state pointer is in x0
573//
574  .p2align 2
575DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
576  // skip restore of x0,x1 for now
577  ldp    x2, x3,  [x0, #0x010]
578  ldp    x4, x5,  [x0, #0x020]
579  ldp    x6, x7,  [x0, #0x030]
580  ldp    x8, x9,  [x0, #0x040]
581  ldp    x10,x11, [x0, #0x050]
582  ldp    x12,x13, [x0, #0x060]
583  ldp    x14,x15, [x0, #0x070]
584  // x16 and x17 were clobbered by the call into the unwinder, so no point in
585  // restoring them.
586  ldp    x18,x19, [x0, #0x090]
587  ldp    x20,x21, [x0, #0x0A0]
588  ldp    x22,x23, [x0, #0x0B0]
589  ldp    x24,x25, [x0, #0x0C0]
590  ldp    x26,x27, [x0, #0x0D0]
591  ldp    x28,x29, [x0, #0x0E0]
592  ldr    x30,     [x0, #0x100]  // restore pc into lr
593
594  ldp    d0, d1,  [x0, #0x110]
595  ldp    d2, d3,  [x0, #0x120]
596  ldp    d4, d5,  [x0, #0x130]
597  ldp    d6, d7,  [x0, #0x140]
598  ldp    d8, d9,  [x0, #0x150]
599  ldp    d10,d11, [x0, #0x160]
600  ldp    d12,d13, [x0, #0x170]
601  ldp    d14,d15, [x0, #0x180]
602  ldp    d16,d17, [x0, #0x190]
603  ldp    d18,d19, [x0, #0x1A0]
604  ldp    d20,d21, [x0, #0x1B0]
605  ldp    d22,d23, [x0, #0x1C0]
606  ldp    d24,d25, [x0, #0x1D0]
607  ldp    d26,d27, [x0, #0x1E0]
608  ldp    d28,d29, [x0, #0x1F0]
609  ldr    d30,     [x0, #0x200]
610  ldr    d31,     [x0, #0x208]
611
612  // Finally, restore sp. This must be done after the the last read from the
613  // context struct, because it is allocated on the stack, and an exception
614  // could clobber the de-allocated portion of the stack after sp has been
615  // restored.
616  ldr    x16,     [x0, #0x0F8]
617  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
618  mov    sp,x16                 // restore sp
619  ret    x30                    // jump to pc
620
621#elif defined(__arm__) && !defined(__APPLE__)
622
623#if !defined(__ARM_ARCH_ISA_ARM)
624#if (__ARM_ARCH_ISA_THUMB == 2)
625  .syntax unified
626#endif
627  .thumb
628#endif
629
630@
631@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
632@
633@ On entry:
634@  thread_state pointer is in r0
635@
636  .p2align 2
637DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
638#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
639  @ r8-r11: ldm into r1-r4, then mov to r8-r11
640  adds r0, #0x20
641  ldm r0!, {r1-r4}
642  subs r0, #0x30
643  mov r8, r1
644  mov r9, r2
645  mov r10, r3
646  mov r11, r4
647  @ r12 does not need loading, it it the intra-procedure-call scratch register
648  ldr r2, [r0, #0x34]
649  ldr r3, [r0, #0x3c]
650  mov sp, r2
651  mov lr, r3         @ restore pc into lr
652  ldm r0, {r0-r7}
653#else
654  @ Use lr as base so that r0 can be restored.
655  mov lr, r0
656  @ 32bit thumb-2 restrictions for ldm:
657  @ . the sp (r13) cannot be in the list
658  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
659  ldm lr, {r0-r12}
660  ldr sp, [lr, #52]
661  ldr lr, [lr, #60]  @ restore pc into lr
662#endif
663#if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)
664  // 'bx' is not BTI setting when used with lr, therefore r12 is used instead
665  mov r12, lr
666  JMP(r12)
667#else
668  JMP(lr)
669#endif
670
671@
672@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
673@
674@ On entry:
675@  values pointer is in r0
676@
677  .p2align 2
678#if defined(__ELF__)
679  .fpu vfpv3-d16
680#endif
681DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
682  @ VFP and iwMMX instructions are only available when compiling with the flags
683  @ that enable them. We do not want to do that in the library (because we do not
684  @ want the compiler to generate instructions that access those) but this is
685  @ only accessed if the personality routine needs these registers. Use of
686  @ these registers implies they are, actually, available on the target, so
687  @ it's ok to execute.
688  @ So, generate the instruction using the corresponding coprocessor mnemonic.
689  vldmia r0, {d0-d15}
690  JMP(lr)
691
692@
693@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
694@
695@ On entry:
696@  values pointer is in r0
697@
698  .p2align 2
699#if defined(__ELF__)
700  .fpu vfpv3-d16
701#endif
702DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
703  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
704  JMP(lr)
705
706@
707@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
708@
709@ On entry:
710@  values pointer is in r0
711@
712  .p2align 2
713#if defined(__ELF__)
714  .fpu vfpv3
715#endif
716DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
717  vldmia r0, {d16-d31}
718  JMP(lr)
719
720#if defined(__ARM_WMMX)
721
722@
723@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
724@
725@ On entry:
726@  values pointer is in r0
727@
728  .p2align 2
729#if defined(__ELF__)
730  .arch armv5te
731#endif
732DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
733  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
734  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
735  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
736  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
737  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
738  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
739  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
740  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
741  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
742  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
743  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
744  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
745  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
746  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
747  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
748  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
749  JMP(lr)
750
751@
752@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
753@
754@ On entry:
755@  values pointer is in r0
756@
757  .p2align 2
758#if defined(__ELF__)
759  .arch armv5te
760#endif
761DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
762  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
763  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
764  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
765  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
766  JMP(lr)
767
768#endif
769
770#elif defined(__or1k__)
771
772DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
773#
774# void libunwind::Registers_or1k::jumpto()
775#
776# On entry:
777#  thread_state pointer is in r3
778#
779
780  # restore integral registers
781  l.lwz     r0,  0(r3)
782  l.lwz     r1,  4(r3)
783  l.lwz     r2,  8(r3)
784  # skip r3 for now
785  l.lwz     r4, 16(r3)
786  l.lwz     r5, 20(r3)
787  l.lwz     r6, 24(r3)
788  l.lwz     r7, 28(r3)
789  l.lwz     r8, 32(r3)
790  # skip r9
791  l.lwz    r10, 40(r3)
792  l.lwz    r11, 44(r3)
793  l.lwz    r12, 48(r3)
794  l.lwz    r13, 52(r3)
795  l.lwz    r14, 56(r3)
796  l.lwz    r15, 60(r3)
797  l.lwz    r16, 64(r3)
798  l.lwz    r17, 68(r3)
799  l.lwz    r18, 72(r3)
800  l.lwz    r19, 76(r3)
801  l.lwz    r20, 80(r3)
802  l.lwz    r21, 84(r3)
803  l.lwz    r22, 88(r3)
804  l.lwz    r23, 92(r3)
805  l.lwz    r24, 96(r3)
806  l.lwz    r25,100(r3)
807  l.lwz    r26,104(r3)
808  l.lwz    r27,108(r3)
809  l.lwz    r28,112(r3)
810  l.lwz    r29,116(r3)
811  l.lwz    r30,120(r3)
812  l.lwz    r31,124(r3)
813
814  # load new pc into ra
815  l.lwz    r9, 128(r3)
816
817  # at last, restore r3
818  l.lwz    r3,  12(r3)
819
820  # jump to pc
821  l.jr     r9
822   l.nop
823
824#elif defined(__hexagon__)
825# On entry:
826#  thread_state pointer is in r2
827DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
828#
829# void libunwind::Registers_hexagon::jumpto()
830#
831  r8 = memw(r0+#32)
832  r9 = memw(r0+#36)
833  r10 = memw(r0+#40)
834  r11 = memw(r0+#44)
835
836  r12 = memw(r0+#48)
837  r13 = memw(r0+#52)
838  r14 = memw(r0+#56)
839  r15 = memw(r0+#60)
840
841  r16 = memw(r0+#64)
842  r17 = memw(r0+#68)
843  r18 = memw(r0+#72)
844  r19 = memw(r0+#76)
845
846  r20 = memw(r0+#80)
847  r21 = memw(r0+#84)
848  r22 = memw(r0+#88)
849  r23 = memw(r0+#92)
850
851  r24 = memw(r0+#96)
852  r25 = memw(r0+#100)
853  r26 = memw(r0+#104)
854  r27 = memw(r0+#108)
855
856  r28 = memw(r0+#112)
857  r29 = memw(r0+#116)
858  r30 = memw(r0+#120)
859  r31 = memw(r0+#132)
860
861  r1 = memw(r0+#128)
862  c4 = r1   // Predicate register
863  r1 = memw(r0+#4)
864  r0 = memw(r0)
865  jumpr r31
866#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
867
868//
869// void libunwind::Registers_mips_o32::jumpto()
870//
871// On entry:
872//  thread state pointer is in a0 ($4)
873//
874DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
875  .set push
876  .set noat
877  .set noreorder
878  .set nomacro
879#ifdef __mips_hard_float
880#if __mips_fpr != 64
881  ldc1  $f0, (4 * 36 + 8 * 0)($4)
882  ldc1  $f2, (4 * 36 + 8 * 2)($4)
883  ldc1  $f4, (4 * 36 + 8 * 4)($4)
884  ldc1  $f6, (4 * 36 + 8 * 6)($4)
885  ldc1  $f8, (4 * 36 + 8 * 8)($4)
886  ldc1  $f10, (4 * 36 + 8 * 10)($4)
887  ldc1  $f12, (4 * 36 + 8 * 12)($4)
888  ldc1  $f14, (4 * 36 + 8 * 14)($4)
889  ldc1  $f16, (4 * 36 + 8 * 16)($4)
890  ldc1  $f18, (4 * 36 + 8 * 18)($4)
891  ldc1  $f20, (4 * 36 + 8 * 20)($4)
892  ldc1  $f22, (4 * 36 + 8 * 22)($4)
893  ldc1  $f24, (4 * 36 + 8 * 24)($4)
894  ldc1  $f26, (4 * 36 + 8 * 26)($4)
895  ldc1  $f28, (4 * 36 + 8 * 28)($4)
896  ldc1  $f30, (4 * 36 + 8 * 30)($4)
897#else
898  ldc1  $f0, (4 * 36 + 8 * 0)($4)
899  ldc1  $f1, (4 * 36 + 8 * 1)($4)
900  ldc1  $f2, (4 * 36 + 8 * 2)($4)
901  ldc1  $f3, (4 * 36 + 8 * 3)($4)
902  ldc1  $f4, (4 * 36 + 8 * 4)($4)
903  ldc1  $f5, (4 * 36 + 8 * 5)($4)
904  ldc1  $f6, (4 * 36 + 8 * 6)($4)
905  ldc1  $f7, (4 * 36 + 8 * 7)($4)
906  ldc1  $f8, (4 * 36 + 8 * 8)($4)
907  ldc1  $f9, (4 * 36 + 8 * 9)($4)
908  ldc1  $f10, (4 * 36 + 8 * 10)($4)
909  ldc1  $f11, (4 * 36 + 8 * 11)($4)
910  ldc1  $f12, (4 * 36 + 8 * 12)($4)
911  ldc1  $f13, (4 * 36 + 8 * 13)($4)
912  ldc1  $f14, (4 * 36 + 8 * 14)($4)
913  ldc1  $f15, (4 * 36 + 8 * 15)($4)
914  ldc1  $f16, (4 * 36 + 8 * 16)($4)
915  ldc1  $f17, (4 * 36 + 8 * 17)($4)
916  ldc1  $f18, (4 * 36 + 8 * 18)($4)
917  ldc1  $f19, (4 * 36 + 8 * 19)($4)
918  ldc1  $f20, (4 * 36 + 8 * 20)($4)
919  ldc1  $f21, (4 * 36 + 8 * 21)($4)
920  ldc1  $f22, (4 * 36 + 8 * 22)($4)
921  ldc1  $f23, (4 * 36 + 8 * 23)($4)
922  ldc1  $f24, (4 * 36 + 8 * 24)($4)
923  ldc1  $f25, (4 * 36 + 8 * 25)($4)
924  ldc1  $f26, (4 * 36 + 8 * 26)($4)
925  ldc1  $f27, (4 * 36 + 8 * 27)($4)
926  ldc1  $f28, (4 * 36 + 8 * 28)($4)
927  ldc1  $f29, (4 * 36 + 8 * 29)($4)
928  ldc1  $f30, (4 * 36 + 8 * 30)($4)
929  ldc1  $f31, (4 * 36 + 8 * 31)($4)
930#endif
931#endif
932  // restore hi and lo
933  lw    $8, (4 * 33)($4)
934  mthi  $8
935  lw    $8, (4 * 34)($4)
936  mtlo  $8
937  // r0 is zero
938  lw    $1, (4 * 1)($4)
939  lw    $2, (4 * 2)($4)
940  lw    $3, (4 * 3)($4)
941  // skip a0 for now
942  lw    $5, (4 * 5)($4)
943  lw    $6, (4 * 6)($4)
944  lw    $7, (4 * 7)($4)
945  lw    $8, (4 * 8)($4)
946  lw    $9, (4 * 9)($4)
947  lw    $10, (4 * 10)($4)
948  lw    $11, (4 * 11)($4)
949  lw    $12, (4 * 12)($4)
950  lw    $13, (4 * 13)($4)
951  lw    $14, (4 * 14)($4)
952  lw    $15, (4 * 15)($4)
953  lw    $16, (4 * 16)($4)
954  lw    $17, (4 * 17)($4)
955  lw    $18, (4 * 18)($4)
956  lw    $19, (4 * 19)($4)
957  lw    $20, (4 * 20)($4)
958  lw    $21, (4 * 21)($4)
959  lw    $22, (4 * 22)($4)
960  lw    $23, (4 * 23)($4)
961  lw    $24, (4 * 24)($4)
962  lw    $25, (4 * 25)($4)
963  lw    $26, (4 * 26)($4)
964  lw    $27, (4 * 27)($4)
965  lw    $28, (4 * 28)($4)
966  lw    $29, (4 * 29)($4)
967  lw    $30, (4 * 30)($4)
968  // load new pc into ra
969  lw    $31, (4 * 32)($4)
970  // jump to ra, load a0 in the delay slot
971  jr    $31
972  lw    $4, (4 * 4)($4)
973  .set pop
974
975#elif defined(__mips64)
976
977//
978// void libunwind::Registers_mips_newabi::jumpto()
979//
980// On entry:
981//  thread state pointer is in a0 ($4)
982//
983DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
984  .set push
985  .set noat
986  .set noreorder
987  .set nomacro
988#ifdef __mips_hard_float
989  ldc1  $f0, (8 * 35)($4)
990  ldc1  $f1, (8 * 36)($4)
991  ldc1  $f2, (8 * 37)($4)
992  ldc1  $f3, (8 * 38)($4)
993  ldc1  $f4, (8 * 39)($4)
994  ldc1  $f5, (8 * 40)($4)
995  ldc1  $f6, (8 * 41)($4)
996  ldc1  $f7, (8 * 42)($4)
997  ldc1  $f8, (8 * 43)($4)
998  ldc1  $f9, (8 * 44)($4)
999  ldc1  $f10, (8 * 45)($4)
1000  ldc1  $f11, (8 * 46)($4)
1001  ldc1  $f12, (8 * 47)($4)
1002  ldc1  $f13, (8 * 48)($4)
1003  ldc1  $f14, (8 * 49)($4)
1004  ldc1  $f15, (8 * 50)($4)
1005  ldc1  $f16, (8 * 51)($4)
1006  ldc1  $f17, (8 * 52)($4)
1007  ldc1  $f18, (8 * 53)($4)
1008  ldc1  $f19, (8 * 54)($4)
1009  ldc1  $f20, (8 * 55)($4)
1010  ldc1  $f21, (8 * 56)($4)
1011  ldc1  $f22, (8 * 57)($4)
1012  ldc1  $f23, (8 * 58)($4)
1013  ldc1  $f24, (8 * 59)($4)
1014  ldc1  $f25, (8 * 60)($4)
1015  ldc1  $f26, (8 * 61)($4)
1016  ldc1  $f27, (8 * 62)($4)
1017  ldc1  $f28, (8 * 63)($4)
1018  ldc1  $f29, (8 * 64)($4)
1019  ldc1  $f30, (8 * 65)($4)
1020  ldc1  $f31, (8 * 66)($4)
1021#endif
1022  // restore hi and lo
1023  ld    $8, (8 * 33)($4)
1024  mthi  $8
1025  ld    $8, (8 * 34)($4)
1026  mtlo  $8
1027  // r0 is zero
1028  ld    $1, (8 * 1)($4)
1029  ld    $2, (8 * 2)($4)
1030  ld    $3, (8 * 3)($4)
1031  // skip a0 for now
1032  ld    $5, (8 * 5)($4)
1033  ld    $6, (8 * 6)($4)
1034  ld    $7, (8 * 7)($4)
1035  ld    $8, (8 * 8)($4)
1036  ld    $9, (8 * 9)($4)
1037  ld    $10, (8 * 10)($4)
1038  ld    $11, (8 * 11)($4)
1039  ld    $12, (8 * 12)($4)
1040  ld    $13, (8 * 13)($4)
1041  ld    $14, (8 * 14)($4)
1042  ld    $15, (8 * 15)($4)
1043  ld    $16, (8 * 16)($4)
1044  ld    $17, (8 * 17)($4)
1045  ld    $18, (8 * 18)($4)
1046  ld    $19, (8 * 19)($4)
1047  ld    $20, (8 * 20)($4)
1048  ld    $21, (8 * 21)($4)
1049  ld    $22, (8 * 22)($4)
1050  ld    $23, (8 * 23)($4)
1051  ld    $24, (8 * 24)($4)
1052  ld    $25, (8 * 25)($4)
1053  ld    $26, (8 * 26)($4)
1054  ld    $27, (8 * 27)($4)
1055  ld    $28, (8 * 28)($4)
1056  ld    $29, (8 * 29)($4)
1057  ld    $30, (8 * 30)($4)
1058  // load new pc into ra
1059  ld    $31, (8 * 32)($4)
1060  // jump to ra, load a0 in the delay slot
1061  jr    $31
1062  ld    $4, (8 * 4)($4)
1063  .set pop
1064
1065#elif defined(__sparc__)
1066
1067//
1068// void libunwind::Registers_sparc_o32::jumpto()
1069//
1070// On entry:
1071//  thread_state pointer is in o0
1072//
1073DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1074  ta 3
1075  ldd [%o0 + 64],  %l0
1076  ldd [%o0 + 72],  %l2
1077  ldd [%o0 + 80],  %l4
1078  ldd [%o0 + 88],  %l6
1079  ldd [%o0 + 96],  %i0
1080  ldd [%o0 + 104], %i2
1081  ldd [%o0 + 112], %i4
1082  ldd [%o0 + 120], %i6
1083  ld  [%o0 + 60],  %o7
1084  jmp %o7
1085   nop
1086
1087#elif defined(__riscv)
1088
1089//
1090// void libunwind::Registers_riscv::jumpto()
1091//
1092// On entry:
1093//  thread_state pointer is in a0
1094//
1095  .p2align 2
1096DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1097# if defined(__riscv_flen)
1098  FLOAD    f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
1099  FLOAD    f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
1100  FLOAD    f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
1101  FLOAD    f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
1102  FLOAD    f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
1103  FLOAD    f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
1104  FLOAD    f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
1105  FLOAD    f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
1106  FLOAD    f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
1107  FLOAD    f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
1108  FLOAD    f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
1109  FLOAD    f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
1110  FLOAD    f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
1111  FLOAD    f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
1112  FLOAD    f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
1113  FLOAD    f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
1114  FLOAD    f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
1115  FLOAD    f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
1116  FLOAD    f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
1117  FLOAD    f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
1118  FLOAD    f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
1119  FLOAD    f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
1120  FLOAD    f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
1121  FLOAD    f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
1122  FLOAD    f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
1123  FLOAD    f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
1124  FLOAD    f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
1125  FLOAD    f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
1126  FLOAD    f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
1127  FLOAD    f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
1128  FLOAD    f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
1129  FLOAD    f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
1130# endif
1131
1132  // x0 is zero
1133  ILOAD    x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1134  ILOAD    x2, (RISCV_ISIZE * 2)(a0)
1135  ILOAD    x3, (RISCV_ISIZE * 3)(a0)
1136  ILOAD    x4, (RISCV_ISIZE * 4)(a0)
1137  ILOAD    x5, (RISCV_ISIZE * 5)(a0)
1138  ILOAD    x6, (RISCV_ISIZE * 6)(a0)
1139  ILOAD    x7, (RISCV_ISIZE * 7)(a0)
1140  ILOAD    x8, (RISCV_ISIZE * 8)(a0)
1141  ILOAD    x9, (RISCV_ISIZE * 9)(a0)
1142  // skip a0 for now
1143  ILOAD    x11, (RISCV_ISIZE * 11)(a0)
1144  ILOAD    x12, (RISCV_ISIZE * 12)(a0)
1145  ILOAD    x13, (RISCV_ISIZE * 13)(a0)
1146  ILOAD    x14, (RISCV_ISIZE * 14)(a0)
1147  ILOAD    x15, (RISCV_ISIZE * 15)(a0)
1148  ILOAD    x16, (RISCV_ISIZE * 16)(a0)
1149  ILOAD    x17, (RISCV_ISIZE * 17)(a0)
1150  ILOAD    x18, (RISCV_ISIZE * 18)(a0)
1151  ILOAD    x19, (RISCV_ISIZE * 19)(a0)
1152  ILOAD    x20, (RISCV_ISIZE * 20)(a0)
1153  ILOAD    x21, (RISCV_ISIZE * 21)(a0)
1154  ILOAD    x22, (RISCV_ISIZE * 22)(a0)
1155  ILOAD    x23, (RISCV_ISIZE * 23)(a0)
1156  ILOAD    x24, (RISCV_ISIZE * 24)(a0)
1157  ILOAD    x25, (RISCV_ISIZE * 25)(a0)
1158  ILOAD    x26, (RISCV_ISIZE * 26)(a0)
1159  ILOAD    x27, (RISCV_ISIZE * 27)(a0)
1160  ILOAD    x28, (RISCV_ISIZE * 28)(a0)
1161  ILOAD    x29, (RISCV_ISIZE * 29)(a0)
1162  ILOAD    x30, (RISCV_ISIZE * 30)(a0)
1163  ILOAD    x31, (RISCV_ISIZE * 31)(a0)
1164  ILOAD    x10, (RISCV_ISIZE * 10)(a0)   // restore a0
1165
1166  ret                       // jump to ra
1167
1168#endif
1169
1170#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1171
1172NO_EXEC_STACK_DIRECTIVE
1173
1174