xref: /linux/arch/arm64/include/uapi/asm/sigcontext.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 #ifndef _UAPI__ASM_SIGCONTEXT_H
18 #define _UAPI__ASM_SIGCONTEXT_H
19 
20 #ifndef __ASSEMBLY__
21 
22 #include <linux/types.h>
23 
24 /*
25  * Signal context structure - contains all info to do with the state
26  * before the signal handler was invoked.
27  */
28 struct sigcontext {
29 	__u64 fault_address;
30 	/* AArch64 registers */
31 	__u64 regs[31];
32 	__u64 sp;
33 	__u64 pc;
34 	__u64 pstate;
35 	/* 4K reserved for FP/SIMD state and future expansion */
36 	__u8 __reserved[4096] __attribute__((__aligned__(16)));
37 };
38 
39 /*
40  * Allocation of __reserved[]:
41  * (Note: records do not necessarily occur in the order shown here.)
42  *
43  *	size		description
44  *
45  *	0x210		fpsimd_context
46  *	 0x10		esr_context
47  *	0x8a0		sve_context (vl <= 64) (optional)
48  *	 0x20		extra_context (optional)
49  *	 0x10		terminator (null _aarch64_ctx)
50  *
51  *	0x510		(reserved for future allocation)
52  *
53  * New records that can exceed this space need to be opt-in for userspace, so
54  * that an expanded signal frame is not generated unexpectedly.  The mechanism
55  * for opting in will depend on the extension that generates each new record.
56  * The above table documents the maximum set and sizes of records than can be
57  * generated when userspace does not opt in for any such extension.
58  */
59 
60 /*
61  * Header to be used at the beginning of structures extending the user
62  * context. Such structures must be placed after the rt_sigframe on the stack
63  * and be 16-byte aligned. The last structure must be a dummy one with the
64  * magic and size set to 0.
65  *
66  * Note that the values allocated for use as magic should be chosen to
67  * be meaningful in ASCII to aid manual parsing, ZA doesn't follow this
68  * convention due to oversight but it should be observed for future additions.
69  */
70 struct _aarch64_ctx {
71 	__u32 magic;
72 	__u32 size;
73 };
74 
75 #define FPSIMD_MAGIC	0x46508001
76 
77 struct fpsimd_context {
78 	struct _aarch64_ctx head;
79 	__u32 fpsr;
80 	__u32 fpcr;
81 	__uint128_t vregs[32];
82 };
83 
84 /*
85  * Note: similarly to all other integer fields, each V-register is stored in an
86  * endianness-dependent format, with the byte at offset i from the start of the
87  * in-memory representation of the register value containing
88  *
89  *    bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or
90  *    bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts.
91  */
92 
93 /* ESR_EL1 context */
94 #define ESR_MAGIC	0x45535201
95 
96 struct esr_context {
97 	struct _aarch64_ctx head;
98 	__u64 esr;
99 };
100 
101 #define POE_MAGIC	0x504f4530
102 
103 struct poe_context {
104 	struct _aarch64_ctx head;
105 	__u64 por_el0;
106 };
107 
108 /*
109  * extra_context: describes extra space in the signal frame for
110  * additional structures that don't fit in sigcontext.__reserved[].
111  *
112  * Note:
113  *
114  * 1) fpsimd_context, esr_context and extra_context must be placed in
115  * sigcontext.__reserved[] if present.  They cannot be placed in the
116  * extra space.  Any other record can be placed either in the extra
117  * space or in sigcontext.__reserved[], unless otherwise specified in
118  * this file.
119  *
120  * 2) There must not be more than one extra_context.
121  *
122  * 3) If extra_context is present, it must be followed immediately in
123  * sigcontext.__reserved[] by the terminating null _aarch64_ctx.
124  *
125  * 4) The extra space to which datap points must start at the first
126  * 16-byte aligned address immediately after the terminating null
127  * _aarch64_ctx that follows the extra_context structure in
128  * __reserved[].  The extra space may overrun the end of __reserved[],
129  * as indicated by a sufficiently large value for the size field.
130  *
131  * 5) The extra space must itself be terminated with a null
132  * _aarch64_ctx.
133  */
134 #define EXTRA_MAGIC	0x45585401
135 
136 struct extra_context {
137 	struct _aarch64_ctx head;
138 	__u64 datap; /* 16-byte aligned pointer to extra space cast to __u64 */
139 	__u32 size; /* size in bytes of the extra space */
140 	__u32 __reserved[3];
141 };
142 
143 #define SVE_MAGIC	0x53564501
144 
145 struct sve_context {
146 	struct _aarch64_ctx head;
147 	__u16 vl;
148 	__u16 flags;
149 	__u16 __reserved[2];
150 };
151 
152 #define SVE_SIG_FLAG_SM	0x1	/* Context describes streaming mode */
153 
154 /* TPIDR2_EL0 context */
155 #define TPIDR2_MAGIC	0x54504902
156 
157 struct tpidr2_context {
158 	struct _aarch64_ctx head;
159 	__u64 tpidr2;
160 };
161 
162 /* FPMR context */
163 #define FPMR_MAGIC	0x46504d52
164 
165 struct fpmr_context {
166 	struct _aarch64_ctx head;
167 	__u64 fpmr;
168 };
169 
170 #define ZA_MAGIC	0x54366345
171 
172 struct za_context {
173 	struct _aarch64_ctx head;
174 	__u16 vl;
175 	__u16 __reserved[3];
176 };
177 
178 #define ZT_MAGIC	0x5a544e01
179 
180 struct zt_context {
181 	struct _aarch64_ctx head;
182 	__u16 nregs;
183 	__u16 __reserved[3];
184 };
185 
186 #define GCS_MAGIC	0x47435300
187 
188 struct gcs_context {
189 	struct _aarch64_ctx head;
190 	__u64 gcspr;
191 	__u64 features_enabled;
192 	__u64 reserved;
193 };
194 
195 #endif /* !__ASSEMBLY__ */
196 
197 #include <asm/sve_context.h>
198 
199 /*
200  * The SVE architecture leaves space for future expansion of the
201  * vector length beyond its initial architectural limit of 2048 bits
202  * (16 quadwords).
203  *
204  * See linux/Documentation/arch/arm64/sve.rst for a description of the VL/VQ
205  * terminology.
206  */
207 #define SVE_VQ_BYTES		__SVE_VQ_BYTES	/* bytes per quadword */
208 
209 #define SVE_VQ_MIN		__SVE_VQ_MIN
210 #define SVE_VQ_MAX		__SVE_VQ_MAX
211 
212 #define SVE_VL_MIN		__SVE_VL_MIN
213 #define SVE_VL_MAX		__SVE_VL_MAX
214 
215 #define SVE_NUM_ZREGS		__SVE_NUM_ZREGS
216 #define SVE_NUM_PREGS		__SVE_NUM_PREGS
217 
218 #define sve_vl_valid(vl)	__sve_vl_valid(vl)
219 #define sve_vq_from_vl(vl)	__sve_vq_from_vl(vl)
220 #define sve_vl_from_vq(vq)	__sve_vl_from_vq(vq)
221 
222 /*
223  * If the SVE registers are currently live for the thread at signal delivery,
224  * sve_context.head.size >=
225  *	SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl))
226  * and the register data may be accessed using the SVE_SIG_*() macros.
227  *
228  * If sve_context.head.size <
229  *	SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)),
230  * the SVE registers were not live for the thread and no register data
231  * is included: in this case, the SVE_SIG_*() macros should not be
232  * used except for this check.
233  *
234  * The same convention applies when returning from a signal: a caller
235  * will need to remove or resize the sve_context block if it wants to
236  * make the SVE registers live when they were previously non-live or
237  * vice-versa.  This may require the caller to allocate fresh
238  * memory and/or move other context blocks in the signal frame.
239  *
240  * Changing the vector length during signal return is not permitted:
241  * sve_context.vl must equal the thread's current vector length when
242  * doing a sigreturn.
243  *
244  * On systems with support for SME the SVE register state may reflect either
245  * streaming or non-streaming mode.  In streaming mode the streaming mode
246  * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in
247  * the flags field. It is permitted to enter or leave streaming mode in
248  * a signal return, applications should take care to ensure that any difference
249  * in vector length between the two modes is handled, including any resizing
250  * and movement of context blocks.
251  *
252  * Note: for all these macros, the "vq" argument denotes the vector length
253  * in quadwords (i.e., units of 128 bits).
254  *
255  * The correct way to obtain vq is to use sve_vq_from_vl(vl).  The
256  * result is valid if and only if sve_vl_valid(vl) is true.  This is
257  * guaranteed for a struct sve_context written by the kernel.
258  *
259  *
260  * Additional macros describe the contents and layout of the payload.
261  * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
262  * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
263  * size in bytes:
264  *
265  *	x	type				description
266  *	-	----				-----------
267  *	REGS					the entire SVE context
268  *
269  *	ZREGS	__uint128_t[SVE_NUM_ZREGS][vq]	all Z-registers
270  *	ZREG	__uint128_t[vq]			individual Z-register Zn
271  *
272  *	PREGS	uint16_t[SVE_NUM_PREGS][vq]	all P-registers
273  *	PREG	uint16_t[vq]			individual P-register Pn
274  *
275  *	FFR	uint16_t[vq]			first-fault status register
276  *
277  * Additional data might be appended in the future.
278  *
279  * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR)
280  * is encoded in memory in an endianness-invariant format, with the byte at
281  * offset i from the start of the in-memory representation containing bits
282  * [(7 + 8 * i) : (8 * i)] of the register value.
283  */
284 
285 #define SVE_SIG_ZREG_SIZE(vq)	__SVE_ZREG_SIZE(vq)
286 #define SVE_SIG_PREG_SIZE(vq)	__SVE_PREG_SIZE(vq)
287 #define SVE_SIG_FFR_SIZE(vq)	__SVE_FFR_SIZE(vq)
288 
289 #define SVE_SIG_REGS_OFFSET					\
290 	((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1))	\
291 		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
292 
293 #define SVE_SIG_ZREGS_OFFSET \
294 		(SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET)
295 #define SVE_SIG_ZREG_OFFSET(vq, n) \
296 		(SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
297 #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq)
298 
299 #define SVE_SIG_PREGS_OFFSET(vq) \
300 		(SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
301 #define SVE_SIG_PREG_OFFSET(vq, n) \
302 		(SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
303 #define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq)
304 
305 #define SVE_SIG_FFR_OFFSET(vq) \
306 		(SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
307 
308 #define SVE_SIG_REGS_SIZE(vq) \
309 		(__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq))
310 
311 #define SVE_SIG_CONTEXT_SIZE(vq) \
312 		(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
313 
314 /*
315  * If the ZA register is enabled for the thread at signal delivery then,
316  * za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
317  * and the register data may be accessed using the ZA_SIG_*() macros.
318  *
319  * If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
320  * then ZA was not enabled and no register data was included in which case
321  * ZA register was not enabled for the thread and no register data
322  * the ZA_SIG_*() macros should not be used except for this check.
323  *
324  * The same convention applies when returning from a signal: a caller
325  * will need to remove or resize the za_context block if it wants to
326  * enable the ZA register when it was previously non-live or vice-versa.
327  * This may require the caller to allocate fresh memory and/or move other
328  * context blocks in the signal frame.
329  *
330  * Changing the vector length during signal return is not permitted:
331  * za_context.vl must equal the thread's current SME vector length when
332  * doing a sigreturn.
333  */
334 
335 #define ZA_SIG_REGS_OFFSET					\
336 	((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1))	\
337 		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
338 
339 #define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
340 
341 #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
342 				  (SVE_SIG_ZREG_SIZE(vq) * (n)))
343 
344 #define ZA_SIG_CONTEXT_SIZE(vq) \
345 		(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
346 
347 #define ZT_SIG_REG_SIZE 512
348 
349 #define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8)
350 
351 #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
352 
353 #define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
354 
355 #define ZT_SIG_CONTEXT_SIZE(n) \
356 	(sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))
357 
358 #endif /* _UAPI__ASM_SIGCONTEXT_H */
359