xref: /freebsd/contrib/llvm-project/libunwind/src/UnwindRegistersSave.S (revision 1fd87a682ad7442327078e1eeb63edc4258f9815)
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11    .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16
17#
18# extern int __unw_getcontext(unw_context_t* thread_state)
19#
20# On entry:
21#   +                       +
22#   +-----------------------+
23#   + thread_state pointer  +
24#   +-----------------------+
25#   + return address        +
26#   +-----------------------+   <-- SP
27#   +                       +
28#
29DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
30
31  _LIBUNWIND_CET_ENDBR
32  push  %eax
33  movl  8(%esp), %eax
34  movl  %ebx,  4(%eax)
35  movl  %ecx,  8(%eax)
36  movl  %edx, 12(%eax)
37  movl  %edi, 16(%eax)
38  movl  %esi, 20(%eax)
39  movl  %ebp, 24(%eax)
40  movl  %esp, %edx
41  addl  $8, %edx
42  movl  %edx, 28(%eax)  # store what sp was at call site as esp
43  # skip ss
44  # skip eflags
45  movl  4(%esp), %edx
46  movl  %edx, 40(%eax)  # store return address as eip
47  # skip cs
48  # skip ds
49  # skip es
50  # skip fs
51  # skip gs
52  movl  (%esp), %edx
53  movl  %edx, (%eax)  # store original eax
54  popl  %eax
55  xorl  %eax, %eax    # return UNW_ESUCCESS
56  ret
57
58#elif defined(__x86_64__)
59
60#
61# extern int __unw_getcontext(unw_context_t* thread_state)
62#
63# On entry:
64#  thread_state pointer is in rdi
65#
66DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
67#if defined(_WIN64)
68#define PTR %rcx
69#define TMP %rdx
70#else
71#define PTR %rdi
72#define TMP %rsi
73#endif
74
75  _LIBUNWIND_CET_ENDBR
76  movq  %rax,   (PTR)
77  movq  %rbx,  8(PTR)
78  movq  %rcx, 16(PTR)
79  movq  %rdx, 24(PTR)
80  movq  %rdi, 32(PTR)
81  movq  %rsi, 40(PTR)
82  movq  %rbp, 48(PTR)
83  movq  %rsp, 56(PTR)
84  addq  $8,   56(PTR)
85  movq  %r8,  64(PTR)
86  movq  %r9,  72(PTR)
87  movq  %r10, 80(PTR)
88  movq  %r11, 88(PTR)
89  movq  %r12, 96(PTR)
90  movq  %r13,104(PTR)
91  movq  %r14,112(PTR)
92  movq  %r15,120(PTR)
93  movq  (%rsp),TMP
94  movq  TMP,128(PTR) # store return address as rip
95  # skip rflags
96  # skip cs
97  # skip fs
98  # skip gs
99
100#if defined(_WIN64)
101  movdqu %xmm0,176(PTR)
102  movdqu %xmm1,192(PTR)
103  movdqu %xmm2,208(PTR)
104  movdqu %xmm3,224(PTR)
105  movdqu %xmm4,240(PTR)
106  movdqu %xmm5,256(PTR)
107  movdqu %xmm6,272(PTR)
108  movdqu %xmm7,288(PTR)
109  movdqu %xmm8,304(PTR)
110  movdqu %xmm9,320(PTR)
111  movdqu %xmm10,336(PTR)
112  movdqu %xmm11,352(PTR)
113  movdqu %xmm12,368(PTR)
114  movdqu %xmm13,384(PTR)
115  movdqu %xmm14,400(PTR)
116  movdqu %xmm15,416(PTR)
117#endif
118  xorl  %eax, %eax    # return UNW_ESUCCESS
119  ret
120
121#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
122
123#
124# extern int __unw_getcontext(unw_context_t* thread_state)
125#
126# On entry:
127#  thread_state pointer is in a0 ($4)
128#
129DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
130  .set push
131  .set noat
132  .set noreorder
133  .set nomacro
134  sw    $1, (4 * 1)($4)
135  sw    $2, (4 * 2)($4)
136  sw    $3, (4 * 3)($4)
137  sw    $4, (4 * 4)($4)
138  sw    $5, (4 * 5)($4)
139  sw    $6, (4 * 6)($4)
140  sw    $7, (4 * 7)($4)
141  sw    $8, (4 * 8)($4)
142  sw    $9, (4 * 9)($4)
143  sw    $10, (4 * 10)($4)
144  sw    $11, (4 * 11)($4)
145  sw    $12, (4 * 12)($4)
146  sw    $13, (4 * 13)($4)
147  sw    $14, (4 * 14)($4)
148  sw    $15, (4 * 15)($4)
149  sw    $16, (4 * 16)($4)
150  sw    $17, (4 * 17)($4)
151  sw    $18, (4 * 18)($4)
152  sw    $19, (4 * 19)($4)
153  sw    $20, (4 * 20)($4)
154  sw    $21, (4 * 21)($4)
155  sw    $22, (4 * 22)($4)
156  sw    $23, (4 * 23)($4)
157  sw    $24, (4 * 24)($4)
158  sw    $25, (4 * 25)($4)
159  sw    $26, (4 * 26)($4)
160  sw    $27, (4 * 27)($4)
161  sw    $28, (4 * 28)($4)
162  sw    $29, (4 * 29)($4)
163  sw    $30, (4 * 30)($4)
164  sw    $31, (4 * 31)($4)
165  # Store return address to pc
166  sw    $31, (4 * 32)($4)
167  # hi and lo
168  mfhi  $8
169  sw    $8,  (4 * 33)($4)
170  mflo  $8
171  sw    $8,  (4 * 34)($4)
172#ifdef __mips_hard_float
173#if __mips_fpr != 64
174  sdc1  $f0, (4 * 36 + 8 * 0)($4)
175  sdc1  $f2, (4 * 36 + 8 * 2)($4)
176  sdc1  $f4, (4 * 36 + 8 * 4)($4)
177  sdc1  $f6, (4 * 36 + 8 * 6)($4)
178  sdc1  $f8, (4 * 36 + 8 * 8)($4)
179  sdc1  $f10, (4 * 36 + 8 * 10)($4)
180  sdc1  $f12, (4 * 36 + 8 * 12)($4)
181  sdc1  $f14, (4 * 36 + 8 * 14)($4)
182  sdc1  $f16, (4 * 36 + 8 * 16)($4)
183  sdc1  $f18, (4 * 36 + 8 * 18)($4)
184  sdc1  $f20, (4 * 36 + 8 * 20)($4)
185  sdc1  $f22, (4 * 36 + 8 * 22)($4)
186  sdc1  $f24, (4 * 36 + 8 * 24)($4)
187  sdc1  $f26, (4 * 36 + 8 * 26)($4)
188  sdc1  $f28, (4 * 36 + 8 * 28)($4)
189  sdc1  $f30, (4 * 36 + 8 * 30)($4)
190#else
191  sdc1  $f0, (4 * 36 + 8 * 0)($4)
192  sdc1  $f1, (4 * 36 + 8 * 1)($4)
193  sdc1  $f2, (4 * 36 + 8 * 2)($4)
194  sdc1  $f3, (4 * 36 + 8 * 3)($4)
195  sdc1  $f4, (4 * 36 + 8 * 4)($4)
196  sdc1  $f5, (4 * 36 + 8 * 5)($4)
197  sdc1  $f6, (4 * 36 + 8 * 6)($4)
198  sdc1  $f7, (4 * 36 + 8 * 7)($4)
199  sdc1  $f8, (4 * 36 + 8 * 8)($4)
200  sdc1  $f9, (4 * 36 + 8 * 9)($4)
201  sdc1  $f10, (4 * 36 + 8 * 10)($4)
202  sdc1  $f11, (4 * 36 + 8 * 11)($4)
203  sdc1  $f12, (4 * 36 + 8 * 12)($4)
204  sdc1  $f13, (4 * 36 + 8 * 13)($4)
205  sdc1  $f14, (4 * 36 + 8 * 14)($4)
206  sdc1  $f15, (4 * 36 + 8 * 15)($4)
207  sdc1  $f16, (4 * 36 + 8 * 16)($4)
208  sdc1  $f17, (4 * 36 + 8 * 17)($4)
209  sdc1  $f18, (4 * 36 + 8 * 18)($4)
210  sdc1  $f19, (4 * 36 + 8 * 19)($4)
211  sdc1  $f20, (4 * 36 + 8 * 20)($4)
212  sdc1  $f21, (4 * 36 + 8 * 21)($4)
213  sdc1  $f22, (4 * 36 + 8 * 22)($4)
214  sdc1  $f23, (4 * 36 + 8 * 23)($4)
215  sdc1  $f24, (4 * 36 + 8 * 24)($4)
216  sdc1  $f25, (4 * 36 + 8 * 25)($4)
217  sdc1  $f26, (4 * 36 + 8 * 26)($4)
218  sdc1  $f27, (4 * 36 + 8 * 27)($4)
219  sdc1  $f28, (4 * 36 + 8 * 28)($4)
220  sdc1  $f29, (4 * 36 + 8 * 29)($4)
221  sdc1  $f30, (4 * 36 + 8 * 30)($4)
222  sdc1  $f31, (4 * 36 + 8 * 31)($4)
223#endif
224#endif
225  jr	$31
226  # return UNW_ESUCCESS
227  or    $2, $0, $0
228  .set pop
229
230#elif defined(__mips64)
231
232#
233# extern int __unw_getcontext(unw_context_t* thread_state)
234#
235# On entry:
236#  thread_state pointer is in a0 ($4)
237#
238DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
239  .set push
240  .set noat
241  .set noreorder
242  .set nomacro
243  sd    $1, (8 * 1)($4)
244  sd    $2, (8 * 2)($4)
245  sd    $3, (8 * 3)($4)
246  sd    $4, (8 * 4)($4)
247  sd    $5, (8 * 5)($4)
248  sd    $6, (8 * 6)($4)
249  sd    $7, (8 * 7)($4)
250  sd    $8, (8 * 8)($4)
251  sd    $9, (8 * 9)($4)
252  sd    $10, (8 * 10)($4)
253  sd    $11, (8 * 11)($4)
254  sd    $12, (8 * 12)($4)
255  sd    $13, (8 * 13)($4)
256  sd    $14, (8 * 14)($4)
257  sd    $15, (8 * 15)($4)
258  sd    $16, (8 * 16)($4)
259  sd    $17, (8 * 17)($4)
260  sd    $18, (8 * 18)($4)
261  sd    $19, (8 * 19)($4)
262  sd    $20, (8 * 20)($4)
263  sd    $21, (8 * 21)($4)
264  sd    $22, (8 * 22)($4)
265  sd    $23, (8 * 23)($4)
266  sd    $24, (8 * 24)($4)
267  sd    $25, (8 * 25)($4)
268  sd    $26, (8 * 26)($4)
269  sd    $27, (8 * 27)($4)
270  sd    $28, (8 * 28)($4)
271  sd    $29, (8 * 29)($4)
272  sd    $30, (8 * 30)($4)
273  sd    $31, (8 * 31)($4)
274  # Store return address to pc
275  sd    $31, (8 * 32)($4)
276  # hi and lo
277  mfhi  $8
278  sd    $8,  (8 * 33)($4)
279  mflo  $8
280  sd    $8,  (8 * 34)($4)
281#ifdef __mips_hard_float
282  sdc1  $f0, (8 * 35)($4)
283  sdc1  $f1, (8 * 36)($4)
284  sdc1  $f2, (8 * 37)($4)
285  sdc1  $f3, (8 * 38)($4)
286  sdc1  $f4, (8 * 39)($4)
287  sdc1  $f5, (8 * 40)($4)
288  sdc1  $f6, (8 * 41)($4)
289  sdc1  $f7, (8 * 42)($4)
290  sdc1  $f8, (8 * 43)($4)
291  sdc1  $f9, (8 * 44)($4)
292  sdc1  $f10, (8 * 45)($4)
293  sdc1  $f11, (8 * 46)($4)
294  sdc1  $f12, (8 * 47)($4)
295  sdc1  $f13, (8 * 48)($4)
296  sdc1  $f14, (8 * 49)($4)
297  sdc1  $f15, (8 * 50)($4)
298  sdc1  $f16, (8 * 51)($4)
299  sdc1  $f17, (8 * 52)($4)
300  sdc1  $f18, (8 * 53)($4)
301  sdc1  $f19, (8 * 54)($4)
302  sdc1  $f20, (8 * 55)($4)
303  sdc1  $f21, (8 * 56)($4)
304  sdc1  $f22, (8 * 57)($4)
305  sdc1  $f23, (8 * 58)($4)
306  sdc1  $f24, (8 * 59)($4)
307  sdc1  $f25, (8 * 60)($4)
308  sdc1  $f26, (8 * 61)($4)
309  sdc1  $f27, (8 * 62)($4)
310  sdc1  $f28, (8 * 63)($4)
311  sdc1  $f29, (8 * 64)($4)
312  sdc1  $f30, (8 * 65)($4)
313  sdc1  $f31, (8 * 66)($4)
314#endif
315  jr	$31
316  # return UNW_ESUCCESS
317  or    $2, $0, $0
318  .set pop
319
320# elif defined(__mips__)
321
322#
323# extern int __unw_getcontext(unw_context_t* thread_state)
324#
325# Just trap for the time being.
326DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
327  teq $0, $0
328
329#elif defined(__powerpc64__)
330
331//
332// extern int __unw_getcontext(unw_context_t* thread_state)
333//
334// On entry:
335//  thread_state pointer is in r3
336//
337DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
338
339// store register (GPR)
340#define PPC64_STR(n) \
341  std   n, (8 * (n + 2))(3)
342
343  // save GPRs
344  PPC64_STR(0)
345  mflr  0
346  std   0, PPC64_OFFS_SRR0(3) // store lr as ssr0
347  PPC64_STR(1)
348  PPC64_STR(2)
349  PPC64_STR(3)
350  PPC64_STR(4)
351  PPC64_STR(5)
352  PPC64_STR(6)
353  PPC64_STR(7)
354  PPC64_STR(8)
355  PPC64_STR(9)
356  PPC64_STR(10)
357  PPC64_STR(11)
358  PPC64_STR(12)
359  PPC64_STR(13)
360  PPC64_STR(14)
361  PPC64_STR(15)
362  PPC64_STR(16)
363  PPC64_STR(17)
364  PPC64_STR(18)
365  PPC64_STR(19)
366  PPC64_STR(20)
367  PPC64_STR(21)
368  PPC64_STR(22)
369  PPC64_STR(23)
370  PPC64_STR(24)
371  PPC64_STR(25)
372  PPC64_STR(26)
373  PPC64_STR(27)
374  PPC64_STR(28)
375  PPC64_STR(29)
376  PPC64_STR(30)
377  PPC64_STR(31)
378
379  mfcr  0
380  std   0,  PPC64_OFFS_CR(3)
381  mfxer 0
382  std   0,  PPC64_OFFS_XER(3)
383  mflr  0
384  std   0,  PPC64_OFFS_LR(3)
385  mfctr 0
386  std   0,  PPC64_OFFS_CTR(3)
387  mfvrsave    0
388  std   0,  PPC64_OFFS_VRSAVE(3)
389
390#if defined(__VSX__)
391  // save VS registers
392  // (note that this also saves floating point registers and V registers,
393  // because part of VS is mapped to these registers)
394
395  addi  4, 3, PPC64_OFFS_FP
396
397// store VS register
398#define PPC64_STVS(n)      \
399  stxvd2x n, 0, 4         ;\
400  addi    4, 4, 16
401
402  PPC64_STVS(0)
403  PPC64_STVS(1)
404  PPC64_STVS(2)
405  PPC64_STVS(3)
406  PPC64_STVS(4)
407  PPC64_STVS(5)
408  PPC64_STVS(6)
409  PPC64_STVS(7)
410  PPC64_STVS(8)
411  PPC64_STVS(9)
412  PPC64_STVS(10)
413  PPC64_STVS(11)
414  PPC64_STVS(12)
415  PPC64_STVS(13)
416  PPC64_STVS(14)
417  PPC64_STVS(15)
418  PPC64_STVS(16)
419  PPC64_STVS(17)
420  PPC64_STVS(18)
421  PPC64_STVS(19)
422  PPC64_STVS(20)
423  PPC64_STVS(21)
424  PPC64_STVS(22)
425  PPC64_STVS(23)
426  PPC64_STVS(24)
427  PPC64_STVS(25)
428  PPC64_STVS(26)
429  PPC64_STVS(27)
430  PPC64_STVS(28)
431  PPC64_STVS(29)
432  PPC64_STVS(30)
433  PPC64_STVS(31)
434  PPC64_STVS(32)
435  PPC64_STVS(33)
436  PPC64_STVS(34)
437  PPC64_STVS(35)
438  PPC64_STVS(36)
439  PPC64_STVS(37)
440  PPC64_STVS(38)
441  PPC64_STVS(39)
442  PPC64_STVS(40)
443  PPC64_STVS(41)
444  PPC64_STVS(42)
445  PPC64_STVS(43)
446  PPC64_STVS(44)
447  PPC64_STVS(45)
448  PPC64_STVS(46)
449  PPC64_STVS(47)
450  PPC64_STVS(48)
451  PPC64_STVS(49)
452  PPC64_STVS(50)
453  PPC64_STVS(51)
454  PPC64_STVS(52)
455  PPC64_STVS(53)
456  PPC64_STVS(54)
457  PPC64_STVS(55)
458  PPC64_STVS(56)
459  PPC64_STVS(57)
460  PPC64_STVS(58)
461  PPC64_STVS(59)
462  PPC64_STVS(60)
463  PPC64_STVS(61)
464  PPC64_STVS(62)
465  PPC64_STVS(63)
466
467#else
468
469// store FP register
470#define PPC64_STF(n) \
471  stfd  n, (PPC64_OFFS_FP + n * 16)(3)
472
473  // save float registers
474  PPC64_STF(0)
475  PPC64_STF(1)
476  PPC64_STF(2)
477  PPC64_STF(3)
478  PPC64_STF(4)
479  PPC64_STF(5)
480  PPC64_STF(6)
481  PPC64_STF(7)
482  PPC64_STF(8)
483  PPC64_STF(9)
484  PPC64_STF(10)
485  PPC64_STF(11)
486  PPC64_STF(12)
487  PPC64_STF(13)
488  PPC64_STF(14)
489  PPC64_STF(15)
490  PPC64_STF(16)
491  PPC64_STF(17)
492  PPC64_STF(18)
493  PPC64_STF(19)
494  PPC64_STF(20)
495  PPC64_STF(21)
496  PPC64_STF(22)
497  PPC64_STF(23)
498  PPC64_STF(24)
499  PPC64_STF(25)
500  PPC64_STF(26)
501  PPC64_STF(27)
502  PPC64_STF(28)
503  PPC64_STF(29)
504  PPC64_STF(30)
505  PPC64_STF(31)
506
507#if defined(__ALTIVEC__)
508  // save vector registers
509
510  // Use 16-bytes below the stack pointer as an
511  // aligned buffer to save each vector register.
512  // Note that the stack pointer is always 16-byte aligned.
513  subi  4, 1, 16
514
515#define PPC64_STV_UNALIGNED(n)             \
516  stvx  n, 0, 4                           ;\
517  ld    5, 0(4)                           ;\
518  std   5, (PPC64_OFFS_V + n * 16)(3)     ;\
519  ld    5, 8(4)                           ;\
520  std   5, (PPC64_OFFS_V + n * 16 + 8)(3)
521
522  PPC64_STV_UNALIGNED(0)
523  PPC64_STV_UNALIGNED(1)
524  PPC64_STV_UNALIGNED(2)
525  PPC64_STV_UNALIGNED(3)
526  PPC64_STV_UNALIGNED(4)
527  PPC64_STV_UNALIGNED(5)
528  PPC64_STV_UNALIGNED(6)
529  PPC64_STV_UNALIGNED(7)
530  PPC64_STV_UNALIGNED(8)
531  PPC64_STV_UNALIGNED(9)
532  PPC64_STV_UNALIGNED(10)
533  PPC64_STV_UNALIGNED(11)
534  PPC64_STV_UNALIGNED(12)
535  PPC64_STV_UNALIGNED(13)
536  PPC64_STV_UNALIGNED(14)
537  PPC64_STV_UNALIGNED(15)
538  PPC64_STV_UNALIGNED(16)
539  PPC64_STV_UNALIGNED(17)
540  PPC64_STV_UNALIGNED(18)
541  PPC64_STV_UNALIGNED(19)
542  PPC64_STV_UNALIGNED(20)
543  PPC64_STV_UNALIGNED(21)
544  PPC64_STV_UNALIGNED(22)
545  PPC64_STV_UNALIGNED(23)
546  PPC64_STV_UNALIGNED(24)
547  PPC64_STV_UNALIGNED(25)
548  PPC64_STV_UNALIGNED(26)
549  PPC64_STV_UNALIGNED(27)
550  PPC64_STV_UNALIGNED(28)
551  PPC64_STV_UNALIGNED(29)
552  PPC64_STV_UNALIGNED(30)
553  PPC64_STV_UNALIGNED(31)
554
555#endif
556#endif
557
558  li    3,  0   // return UNW_ESUCCESS
559  blr
560
561
562#elif defined(__powerpc__)
563
564//
565// extern int unw_getcontext(unw_context_t* thread_state)
566//
567// On entry:
568//  thread_state pointer is in r3
569//
570DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
571  stw     0,   8(3)
572  mflr    0
573  stw     0,   0(3) // store lr as ssr0
574  stw     1,  12(3)
575  stw     2,  16(3)
576  stw     3,  20(3)
577  stw     4,  24(3)
578  stw     5,  28(3)
579  stw     6,  32(3)
580  stw     7,  36(3)
581  stw     8,  40(3)
582  stw     9,  44(3)
583  stw     10, 48(3)
584  stw     11, 52(3)
585  stw     12, 56(3)
586  stw     13, 60(3)
587  stw     14, 64(3)
588  stw     15, 68(3)
589  stw     16, 72(3)
590  stw     17, 76(3)
591  stw     18, 80(3)
592  stw     19, 84(3)
593  stw     20, 88(3)
594  stw     21, 92(3)
595  stw     22, 96(3)
596  stw     23,100(3)
597  stw     24,104(3)
598  stw     25,108(3)
599  stw     26,112(3)
600  stw     27,116(3)
601  stw     28,120(3)
602  stw     29,124(3)
603  stw     30,128(3)
604  stw     31,132(3)
605
606  // save VRSave register
607  mfspr   0, 256
608  stw     0, 156(3)
609  // save CR registers
610  mfcr    0
611  stw     0, 136(3)
612  // save CTR register
613  mfctr   0
614  stw     0, 148(3)
615
616#if !defined(__NO_FPRS__)
617  // save float registers
618  stfd    0, 160(3)
619  stfd    1, 168(3)
620  stfd    2, 176(3)
621  stfd    3, 184(3)
622  stfd    4, 192(3)
623  stfd    5, 200(3)
624  stfd    6, 208(3)
625  stfd    7, 216(3)
626  stfd    8, 224(3)
627  stfd    9, 232(3)
628  stfd    10,240(3)
629  stfd    11,248(3)
630  stfd    12,256(3)
631  stfd    13,264(3)
632  stfd    14,272(3)
633  stfd    15,280(3)
634  stfd    16,288(3)
635  stfd    17,296(3)
636  stfd    18,304(3)
637  stfd    19,312(3)
638  stfd    20,320(3)
639  stfd    21,328(3)
640  stfd    22,336(3)
641  stfd    23,344(3)
642  stfd    24,352(3)
643  stfd    25,360(3)
644  stfd    26,368(3)
645  stfd    27,376(3)
646  stfd    28,384(3)
647  stfd    29,392(3)
648  stfd    30,400(3)
649  stfd    31,408(3)
650#endif
651
652#if defined(__ALTIVEC__)
653  // save vector registers
654
655  subi    4, 1, 16
656  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
657  // r4 is now a 16-byte aligned pointer into the red zone
658
659#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
660  stvx    _vec, 0, 4               SEPARATOR \
661  lwz     5, 0(4)                  SEPARATOR \
662  stw     5, _offset(3)            SEPARATOR \
663  lwz     5, 4(4)                  SEPARATOR \
664  stw     5, _offset+4(3)          SEPARATOR \
665  lwz     5, 8(4)                  SEPARATOR \
666  stw     5, _offset+8(3)          SEPARATOR \
667  lwz     5, 12(4)                 SEPARATOR \
668  stw     5, _offset+12(3)
669
670  SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
671  SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
672  SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
673  SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
674  SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
675  SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
676  SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
677  SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
678  SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
679  SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
680  SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
681  SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
682  SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
683  SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
684  SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
685  SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
686  SAVE_VECTOR_UNALIGNED(16, 424+0x100)
687  SAVE_VECTOR_UNALIGNED(17, 424+0x110)
688  SAVE_VECTOR_UNALIGNED(18, 424+0x120)
689  SAVE_VECTOR_UNALIGNED(19, 424+0x130)
690  SAVE_VECTOR_UNALIGNED(20, 424+0x140)
691  SAVE_VECTOR_UNALIGNED(21, 424+0x150)
692  SAVE_VECTOR_UNALIGNED(22, 424+0x160)
693  SAVE_VECTOR_UNALIGNED(23, 424+0x170)
694  SAVE_VECTOR_UNALIGNED(24, 424+0x180)
695  SAVE_VECTOR_UNALIGNED(25, 424+0x190)
696  SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
697  SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
698  SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
699  SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
700  SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
701  SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
702#endif
703
704  li      3, 0  // return UNW_ESUCCESS
705  blr
706
707
708#elif defined(__aarch64__)
709
710//
711// extern int __unw_getcontext(unw_context_t* thread_state)
712//
713// On entry:
714//  thread_state pointer is in x0
715//
716  .p2align 2
717DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
718  stp    x0, x1,  [x0, #0x000]
719  stp    x2, x3,  [x0, #0x010]
720  stp    x4, x5,  [x0, #0x020]
721  stp    x6, x7,  [x0, #0x030]
722  stp    x8, x9,  [x0, #0x040]
723  stp    x10,x11, [x0, #0x050]
724  stp    x12,x13, [x0, #0x060]
725  stp    x14,x15, [x0, #0x070]
726  stp    x16,x17, [x0, #0x080]
727  stp    x18,x19, [x0, #0x090]
728  stp    x20,x21, [x0, #0x0A0]
729  stp    x22,x23, [x0, #0x0B0]
730  stp    x24,x25, [x0, #0x0C0]
731  stp    x26,x27, [x0, #0x0D0]
732  stp    x28,x29, [x0, #0x0E0]
733  str    x30,     [x0, #0x0F0]
734  mov    x1,sp
735  str    x1,      [x0, #0x0F8]
736  str    x30,     [x0, #0x100]    // store return address as pc
737  // skip cpsr
738  stp    d0, d1,  [x0, #0x110]
739  stp    d2, d3,  [x0, #0x120]
740  stp    d4, d5,  [x0, #0x130]
741  stp    d6, d7,  [x0, #0x140]
742  stp    d8, d9,  [x0, #0x150]
743  stp    d10,d11, [x0, #0x160]
744  stp    d12,d13, [x0, #0x170]
745  stp    d14,d15, [x0, #0x180]
746  stp    d16,d17, [x0, #0x190]
747  stp    d18,d19, [x0, #0x1A0]
748  stp    d20,d21, [x0, #0x1B0]
749  stp    d22,d23, [x0, #0x1C0]
750  stp    d24,d25, [x0, #0x1D0]
751  stp    d26,d27, [x0, #0x1E0]
752  stp    d28,d29, [x0, #0x1F0]
753  str    d30,     [x0, #0x200]
754  str    d31,     [x0, #0x208]
755  mov    x0, #0                   // return UNW_ESUCCESS
756  ret
757
758#elif defined(__arm__) && !defined(__APPLE__)
759
760#if !defined(__ARM_ARCH_ISA_ARM)
761#if (__ARM_ARCH_ISA_THUMB == 2)
762  .syntax unified
763#endif
764  .thumb
765#endif
766
767@
768@ extern int __unw_getcontext(unw_context_t* thread_state)
769@
770@ On entry:
771@  thread_state pointer is in r0
772@
773@ Per EHABI #4.7 this only saves the core integer registers.
774@ EHABI #7.4.5 notes that in general all VRS registers should be restored
775@ however this is very hard to do for VFP registers because it is unknown
776@ to the library how many registers are implemented by the architecture.
777@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
778@
779  .p2align 2
780DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
781#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
782  stm r0!, {r0-r7}
783  mov r1, r8
784  mov r2, r9
785  mov r3, r10
786  stm r0!, {r1-r3}
787  mov r1, r11
788  mov r2, sp
789  mov r3, lr
790  str r1, [r0, #0]   @ r11
791  @ r12 does not need storing, it it the intra-procedure-call scratch register
792  str r2, [r0, #8]   @ sp
793  str r3, [r0, #12]  @ lr
794  str r3, [r0, #16]  @ store return address as pc
795  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
796  @ It is safe to use here though because we are about to return, and cpsr is
797  @ not expected to be preserved.
798  movs r0, #0        @ return UNW_ESUCCESS
799#else
800  @ 32bit thumb-2 restrictions for stm:
801  @ . the sp (r13) cannot be in the list
802  @ . the pc (r15) cannot be in the list in an STM instruction
803  stm r0, {r0-r12}
804  str sp, [r0, #52]
805  str lr, [r0, #56]
806  str lr, [r0, #60]  @ store return address as pc
807  mov r0, #0         @ return UNW_ESUCCESS
808#endif
809  JMP(lr)
810
811@
812@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
813@
814@ On entry:
815@  values pointer is in r0
816@
817  .p2align 2
818#if defined(__ELF__)
819  .fpu vfpv3-d16
820#endif
821DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
822  vstmia r0, {d0-d15}
823  JMP(lr)
824
825@
826@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
827@
828@ On entry:
829@  values pointer is in r0
830@
831  .p2align 2
832#if defined(__ELF__)
833  .fpu vfpv3-d16
834#endif
835DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
836  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
837  JMP(lr)
838
839@
840@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
841@
842@ On entry:
843@  values pointer is in r0
844@
845  .p2align 2
846#if defined(__ELF__)
847  .fpu vfpv3
848#endif
849DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
850  @ VFP and iwMMX instructions are only available when compiling with the flags
851  @ that enable them. We do not want to do that in the library (because we do not
852  @ want the compiler to generate instructions that access those) but this is
853  @ only accessed if the personality routine needs these registers. Use of
854  @ these registers implies they are, actually, available on the target, so
855  @ it's ok to execute.
856  @ So, generate the instructions using the corresponding coprocessor mnemonic.
857  vstmia r0, {d16-d31}
858  JMP(lr)
859
860#if defined(_LIBUNWIND_ARM_WMMX)
861
862@
863@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
864@
865@ On entry:
866@  values pointer is in r0
867@
868  .p2align 2
869#if defined(__ELF__)
870  .arch armv5te
871#endif
872DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
873  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
874  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
875  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
876  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
877  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
878  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
879  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
880  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
881  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
882  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
883  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
884  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
885  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
886  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
887  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
888  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
889  JMP(lr)
890
891@
892@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
893@
894@ On entry:
895@  values pointer is in r0
896@
897  .p2align 2
898#if defined(__ELF__)
899  .arch armv5te
900#endif
901DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
902  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
903  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
904  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
905  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
906  JMP(lr)
907
908#endif
909
910#elif defined(__or1k__)
911
912#
913# extern int __unw_getcontext(unw_context_t* thread_state)
914#
915# On entry:
916#  thread_state pointer is in r3
917#
918DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
919  l.sw       0(r3), r0
920  l.sw       4(r3), r1
921  l.sw       8(r3), r2
922  l.sw      12(r3), r3
923  l.sw      16(r3), r4
924  l.sw      20(r3), r5
925  l.sw      24(r3), r6
926  l.sw      28(r3), r7
927  l.sw      32(r3), r8
928  l.sw      36(r3), r9
929  l.sw      40(r3), r10
930  l.sw      44(r3), r11
931  l.sw      48(r3), r12
932  l.sw      52(r3), r13
933  l.sw      56(r3), r14
934  l.sw      60(r3), r15
935  l.sw      64(r3), r16
936  l.sw      68(r3), r17
937  l.sw      72(r3), r18
938  l.sw      76(r3), r19
939  l.sw      80(r3), r20
940  l.sw      84(r3), r21
941  l.sw      88(r3), r22
942  l.sw      92(r3), r23
943  l.sw      96(r3), r24
944  l.sw     100(r3), r25
945  l.sw     104(r3), r26
946  l.sw     108(r3), r27
947  l.sw     112(r3), r28
948  l.sw     116(r3), r29
949  l.sw     120(r3), r30
950  l.sw     124(r3), r31
951  # store ra to pc
952  l.sw     128(r3), r9
953  # zero epcr
954  l.sw     132(r3), r0
955
956#elif defined(__hexagon__)
957#
958# extern int unw_getcontext(unw_context_t* thread_state)
959#
960# On entry:
961#  thread_state pointer is in r0
962#
963#define OFFSET(offset) (offset/4)
964DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
965  memw(r0+#32) = r8
966  memw(r0+#36) = r9
967  memw(r0+#40) = r10
968  memw(r0+#44) = r11
969
970  memw(r0+#48) = r12
971  memw(r0+#52) = r13
972  memw(r0+#56) = r14
973  memw(r0+#60) = r15
974
975  memw(r0+#64) = r16
976  memw(r0+#68) = r17
977  memw(r0+#72) = r18
978  memw(r0+#76) = r19
979
980  memw(r0+#80) = r20
981  memw(r0+#84) = r21
982  memw(r0+#88) = r22
983  memw(r0+#92) = r23
984
985  memw(r0+#96) = r24
986  memw(r0+#100) = r25
987  memw(r0+#104) = r26
988  memw(r0+#108) = r27
989
990  memw(r0+#112) = r28
991  memw(r0+#116) = r29
992  memw(r0+#120) = r30
993  memw(r0+#124) = r31
994  r1 = c4   // Predicate register
995  memw(r0+#128) = r1
996  r1 = memw(r30)           // *FP == Saved FP
997  r1 = r31
998  memw(r0+#132) = r1
999
1000  jumpr r31
1001
1002#elif defined(__sparc__)
1003
1004#
1005# extern int __unw_getcontext(unw_context_t* thread_state)
1006#
1007# On entry:
1008#  thread_state pointer is in o0
1009#
1010DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1011  ta 3
1012  add %o7, 8, %o7
1013  std %g0, [%o0 +   0]
1014  std %g2, [%o0 +   8]
1015  std %g4, [%o0 +  16]
1016  std %g6, [%o0 +  24]
1017  std %o0, [%o0 +  32]
1018  std %o2, [%o0 +  40]
1019  std %o4, [%o0 +  48]
1020  std %o6, [%o0 +  56]
1021  std %l0, [%o0 +  64]
1022  std %l2, [%o0 +  72]
1023  std %l4, [%o0 +  80]
1024  std %l6, [%o0 +  88]
1025  std %i0, [%o0 +  96]
1026  std %i2, [%o0 + 104]
1027  std %i4, [%o0 + 112]
1028  std %i6, [%o0 + 120]
1029  jmp %o7
1030   clr %o0                   // return UNW_ESUCCESS
1031
1032#elif defined(__riscv)
1033
1034#
1035# extern int __unw_getcontext(unw_context_t* thread_state)
1036#
1037# On entry:
1038#  thread_state pointer is in a0
1039#
1040DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1041  ISTORE    x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1042  ISTORE    x1, (RISCV_ISIZE * 1)(a0)
1043  ISTORE    x2, (RISCV_ISIZE * 2)(a0)
1044  ISTORE    x3, (RISCV_ISIZE * 3)(a0)
1045  ISTORE    x4, (RISCV_ISIZE * 4)(a0)
1046  ISTORE    x5, (RISCV_ISIZE * 5)(a0)
1047  ISTORE    x6, (RISCV_ISIZE * 6)(a0)
1048  ISTORE    x7, (RISCV_ISIZE * 7)(a0)
1049  ISTORE    x8, (RISCV_ISIZE * 8)(a0)
1050  ISTORE    x9, (RISCV_ISIZE * 9)(a0)
1051  ISTORE    x10, (RISCV_ISIZE * 10)(a0)
1052  ISTORE    x11, (RISCV_ISIZE * 11)(a0)
1053  ISTORE    x12, (RISCV_ISIZE * 12)(a0)
1054  ISTORE    x13, (RISCV_ISIZE * 13)(a0)
1055  ISTORE    x14, (RISCV_ISIZE * 14)(a0)
1056  ISTORE    x15, (RISCV_ISIZE * 15)(a0)
1057  ISTORE    x16, (RISCV_ISIZE * 16)(a0)
1058  ISTORE    x17, (RISCV_ISIZE * 17)(a0)
1059  ISTORE    x18, (RISCV_ISIZE * 18)(a0)
1060  ISTORE    x19, (RISCV_ISIZE * 19)(a0)
1061  ISTORE    x20, (RISCV_ISIZE * 20)(a0)
1062  ISTORE    x21, (RISCV_ISIZE * 21)(a0)
1063  ISTORE    x22, (RISCV_ISIZE * 22)(a0)
1064  ISTORE    x23, (RISCV_ISIZE * 23)(a0)
1065  ISTORE    x24, (RISCV_ISIZE * 24)(a0)
1066  ISTORE    x25, (RISCV_ISIZE * 25)(a0)
1067  ISTORE    x26, (RISCV_ISIZE * 26)(a0)
1068  ISTORE    x27, (RISCV_ISIZE * 27)(a0)
1069  ISTORE    x28, (RISCV_ISIZE * 28)(a0)
1070  ISTORE    x29, (RISCV_ISIZE * 29)(a0)
1071  ISTORE    x30, (RISCV_ISIZE * 30)(a0)
1072  ISTORE    x31, (RISCV_ISIZE * 31)(a0)
1073
1074# if defined(__riscv_flen)
1075  FSTORE    f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
1076  FSTORE    f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
1077  FSTORE    f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
1078  FSTORE    f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
1079  FSTORE    f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
1080  FSTORE    f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
1081  FSTORE    f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
1082  FSTORE    f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
1083  FSTORE    f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
1084  FSTORE    f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
1085  FSTORE    f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
1086  FSTORE    f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
1087  FSTORE    f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
1088  FSTORE    f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
1089  FSTORE    f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
1090  FSTORE    f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
1091  FSTORE    f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
1092  FSTORE    f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
1093  FSTORE    f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
1094  FSTORE    f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
1095  FSTORE    f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
1096  FSTORE    f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
1097  FSTORE    f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
1098  FSTORE    f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
1099  FSTORE    f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
1100  FSTORE    f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
1101  FSTORE    f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
1102  FSTORE    f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
1103  FSTORE    f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
1104  FSTORE    f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
1105  FSTORE    f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
1106  FSTORE    f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
1107# endif
1108
1109  li     a0, 0  // return UNW_ESUCCESS
1110  ret           // jump to ra
1111#endif
1112
1113  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1114
1115#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1116
1117NO_EXEC_STACK_DIRECTIVE
1118