xref: /linux/arch/loongarch/kernel/signal.c (revision a361474ba3b3c6bdca7bad72dfd2ffb4f11e8e1d)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  *
7  * Derived from MIPS:
8  * Copyright (C) 1991, 1992  Linus Torvalds
9  * Copyright (C) 1994 - 2000  Ralf Baechle
10  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11  * Copyright (C) 2014, Imagination Technologies Ltd.
12  */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/entry-common.h>
17 #include <linux/irqflags.h>
18 #include <linux/rseq.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/personality.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/wait.h>
27 #include <linux/ptrace.h>
28 #include <linux/unistd.h>
29 #include <linux/compiler.h>
30 #include <linux/syscalls.h>
31 #include <linux/uaccess.h>
32 
33 #include <asm/asm.h>
34 #include <asm/cacheflush.h>
35 #include <asm/cpu-features.h>
36 #include <asm/fpu.h>
37 #include <asm/lbt.h>
38 #include <asm/sigframe.h>
39 #include <asm/ucontext.h>
40 #include <asm/vdso.h>
41 
42 #ifdef DEBUG_SIG
43 #  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
44 #else
45 #  define DEBUGP(fmt, args...)
46 #endif
47 
48 /* Make sure we will not lose FPU ownership */
49 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
50 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
51 /* Make sure we will not lose LBT ownership */
52 #define lock_lbt_owner()	({ preempt_disable(); pagefault_disable(); })
53 #define unlock_lbt_owner()	({ pagefault_enable(); preempt_enable(); })
54 
55 struct _ctx_layout {
56 	struct sctx_info *addr;
57 	unsigned int size;
58 };
59 
60 struct extctx_layout {
61 	unsigned long size;
62 	unsigned int flags;
63 	struct _ctx_layout fpu;
64 	struct _ctx_layout lsx;
65 	struct _ctx_layout lasx;
66 	struct _ctx_layout lbt;
67 	struct _ctx_layout end;
68 };
69 
get_ctx_through_ctxinfo(struct sctx_info * info)70 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
71 {
72 	return (void __user *)((char *)info + sizeof(struct sctx_info));
73 }
74 
75 /*
76  * Thread saved context copy to/from a signal context presumed to be on the
77  * user stack, and therefore accessed with appropriate macros from uaccess.h.
78  */
copy_fpu_to_sigcontext(struct fpu_context __user * ctx)79 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
80 {
81 	int i;
82 	int err = 0;
83 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
84 	uint64_t __user *fcc	= &ctx->fcc;
85 	uint32_t __user *fcsr	= &ctx->fcsr;
86 
87 	for (i = 0; i < NUM_FPU_REGS; i++) {
88 		err |=
89 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
90 			       &regs[i]);
91 	}
92 	err |= __put_user(current->thread.fpu.fcc, fcc);
93 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
94 
95 	return err;
96 }
97 
copy_fpu_from_sigcontext(struct fpu_context __user * ctx)98 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
99 {
100 	int i;
101 	int err = 0;
102 	u64 fpr_val;
103 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
104 	uint64_t __user *fcc	= &ctx->fcc;
105 	uint32_t __user *fcsr	= &ctx->fcsr;
106 
107 	for (i = 0; i < NUM_FPU_REGS; i++) {
108 		err |= __get_user(fpr_val, &regs[i]);
109 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
110 	}
111 	err |= __get_user(current->thread.fpu.fcc, fcc);
112 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
113 
114 	return err;
115 }
116 
copy_lsx_to_sigcontext(struct lsx_context __user * ctx)117 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
118 {
119 	int i;
120 	int err = 0;
121 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
122 	uint64_t __user *fcc	= &ctx->fcc;
123 	uint32_t __user *fcsr	= &ctx->fcsr;
124 
125 	for (i = 0; i < NUM_FPU_REGS; i++) {
126 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
127 				  &regs[2*i]);
128 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
129 				  &regs[2*i+1]);
130 	}
131 	err |= __put_user(current->thread.fpu.fcc, fcc);
132 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
133 
134 	return err;
135 }
136 
copy_lsx_from_sigcontext(struct lsx_context __user * ctx)137 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
138 {
139 	int i;
140 	int err = 0;
141 	u64 fpr_val;
142 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
143 	uint64_t __user *fcc	= &ctx->fcc;
144 	uint32_t __user *fcsr	= &ctx->fcsr;
145 
146 	for (i = 0; i < NUM_FPU_REGS; i++) {
147 		err |= __get_user(fpr_val, &regs[2*i]);
148 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
149 		err |= __get_user(fpr_val, &regs[2*i+1]);
150 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
151 	}
152 	err |= __get_user(current->thread.fpu.fcc, fcc);
153 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
154 
155 	return err;
156 }
157 
copy_lasx_to_sigcontext(struct lasx_context __user * ctx)158 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
159 {
160 	int i;
161 	int err = 0;
162 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
163 	uint64_t __user *fcc	= &ctx->fcc;
164 	uint32_t __user *fcsr	= &ctx->fcsr;
165 
166 	for (i = 0; i < NUM_FPU_REGS; i++) {
167 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
168 				  &regs[4*i]);
169 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
170 				  &regs[4*i+1]);
171 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
172 				  &regs[4*i+2]);
173 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
174 				  &regs[4*i+3]);
175 	}
176 	err |= __put_user(current->thread.fpu.fcc, fcc);
177 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
178 
179 	return err;
180 }
181 
copy_lasx_from_sigcontext(struct lasx_context __user * ctx)182 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
183 {
184 	int i;
185 	int err = 0;
186 	u64 fpr_val;
187 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
188 	uint64_t __user *fcc	= &ctx->fcc;
189 	uint32_t __user *fcsr	= &ctx->fcsr;
190 
191 	for (i = 0; i < NUM_FPU_REGS; i++) {
192 		err |= __get_user(fpr_val, &regs[4*i]);
193 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
194 		err |= __get_user(fpr_val, &regs[4*i+1]);
195 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
196 		err |= __get_user(fpr_val, &regs[4*i+2]);
197 		set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
198 		err |= __get_user(fpr_val, &regs[4*i+3]);
199 		set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
200 	}
201 	err |= __get_user(current->thread.fpu.fcc, fcc);
202 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
203 
204 	return err;
205 }
206 
207 #ifdef CONFIG_CPU_HAS_LBT
copy_lbt_to_sigcontext(struct lbt_context __user * ctx)208 static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
209 {
210 	int err = 0;
211 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
212 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
213 
214 	err |= __put_user(current->thread.lbt.scr0, &regs[0]);
215 	err |= __put_user(current->thread.lbt.scr1, &regs[1]);
216 	err |= __put_user(current->thread.lbt.scr2, &regs[2]);
217 	err |= __put_user(current->thread.lbt.scr3, &regs[3]);
218 	err |= __put_user(current->thread.lbt.eflags, eflags);
219 
220 	return err;
221 }
222 
copy_lbt_from_sigcontext(struct lbt_context __user * ctx)223 static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
224 {
225 	int err = 0;
226 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
227 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
228 
229 	err |= __get_user(current->thread.lbt.scr0, &regs[0]);
230 	err |= __get_user(current->thread.lbt.scr1, &regs[1]);
231 	err |= __get_user(current->thread.lbt.scr2, &regs[2]);
232 	err |= __get_user(current->thread.lbt.scr3, &regs[3]);
233 	err |= __get_user(current->thread.lbt.eflags, eflags);
234 
235 	return err;
236 }
237 
copy_ftop_to_sigcontext(struct lbt_context __user * ctx)238 static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
239 {
240 	uint32_t  __user *ftop	= &ctx->ftop;
241 
242 	return __put_user(current->thread.fpu.ftop, ftop);
243 }
244 
copy_ftop_from_sigcontext(struct lbt_context __user * ctx)245 static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
246 {
247 	uint32_t  __user *ftop	= &ctx->ftop;
248 
249 	return __get_user(current->thread.fpu.ftop, ftop);
250 }
251 #endif
252 
253 /*
254  * Wrappers for the assembly _{save,restore}_fp_context functions.
255  */
save_hw_fpu_context(struct fpu_context __user * ctx)256 static int save_hw_fpu_context(struct fpu_context __user *ctx)
257 {
258 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
259 	uint64_t __user *fcc	= &ctx->fcc;
260 	uint32_t __user *fcsr	= &ctx->fcsr;
261 
262 	return _save_fp_context(regs, fcc, fcsr);
263 }
264 
restore_hw_fpu_context(struct fpu_context __user * ctx)265 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
266 {
267 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
268 	uint64_t __user *fcc	= &ctx->fcc;
269 	uint32_t __user *fcsr	= &ctx->fcsr;
270 
271 	return _restore_fp_context(regs, fcc, fcsr);
272 }
273 
save_hw_lsx_context(struct lsx_context __user * ctx)274 static int save_hw_lsx_context(struct lsx_context __user *ctx)
275 {
276 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
277 	uint64_t __user *fcc	= &ctx->fcc;
278 	uint32_t __user *fcsr	= &ctx->fcsr;
279 
280 	return _save_lsx_context(regs, fcc, fcsr);
281 }
282 
restore_hw_lsx_context(struct lsx_context __user * ctx)283 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
284 {
285 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
286 	uint64_t __user *fcc	= &ctx->fcc;
287 	uint32_t __user *fcsr	= &ctx->fcsr;
288 
289 	return _restore_lsx_context(regs, fcc, fcsr);
290 }
291 
save_hw_lasx_context(struct lasx_context __user * ctx)292 static int save_hw_lasx_context(struct lasx_context __user *ctx)
293 {
294 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
295 	uint64_t __user *fcc	= &ctx->fcc;
296 	uint32_t __user *fcsr	= &ctx->fcsr;
297 
298 	return _save_lasx_context(regs, fcc, fcsr);
299 }
300 
restore_hw_lasx_context(struct lasx_context __user * ctx)301 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
302 {
303 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
304 	uint64_t __user *fcc	= &ctx->fcc;
305 	uint32_t __user *fcsr	= &ctx->fcsr;
306 
307 	return _restore_lasx_context(regs, fcc, fcsr);
308 }
309 
310 /*
311  * Wrappers for the assembly _{save,restore}_lbt_context functions.
312  */
313 #ifdef CONFIG_CPU_HAS_LBT
save_hw_lbt_context(struct lbt_context __user * ctx)314 static int save_hw_lbt_context(struct lbt_context __user *ctx)
315 {
316 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
317 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
318 
319 	return _save_lbt_context(regs, eflags);
320 }
321 
restore_hw_lbt_context(struct lbt_context __user * ctx)322 static int restore_hw_lbt_context(struct lbt_context __user *ctx)
323 {
324 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
325 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
326 
327 	return _restore_lbt_context(regs, eflags);
328 }
329 
save_hw_ftop_context(struct lbt_context __user * ctx)330 static int save_hw_ftop_context(struct lbt_context __user *ctx)
331 {
332 	uint32_t __user *ftop	= &ctx->ftop;
333 
334 	return _save_ftop_context(ftop);
335 }
336 
restore_hw_ftop_context(struct lbt_context __user * ctx)337 static int restore_hw_ftop_context(struct lbt_context __user *ctx)
338 {
339 	uint32_t __user *ftop	= &ctx->ftop;
340 
341 	return _restore_ftop_context(ftop);
342 }
343 #endif
344 
fcsr_pending(unsigned int __user * fcsr)345 static int fcsr_pending(unsigned int __user *fcsr)
346 {
347 	int err, sig = 0;
348 	unsigned int csr, enabled;
349 
350 	err = __get_user(csr, fcsr);
351 	enabled = ((csr & FPU_CSR_ALL_E) << 24);
352 	/*
353 	 * If the signal handler set some FPU exceptions, clear it and
354 	 * send SIGFPE.
355 	 */
356 	if (csr & enabled) {
357 		csr &= ~enabled;
358 		err |= __put_user(csr, fcsr);
359 		sig = SIGFPE;
360 	}
361 	return err ?: sig;
362 }
363 
364 /*
365  * Helper routines
366  */
protected_save_fpu_context(struct extctx_layout * extctx)367 static int protected_save_fpu_context(struct extctx_layout *extctx)
368 {
369 	int err = 0;
370 	struct sctx_info __user *info = extctx->fpu.addr;
371 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
372 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
373 	uint64_t __user *fcc	= &fpu_ctx->fcc;
374 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
375 
376 	while (1) {
377 		lock_fpu_owner();
378 		if (is_fpu_owner())
379 			err = save_hw_fpu_context(fpu_ctx);
380 		else
381 			err = copy_fpu_to_sigcontext(fpu_ctx);
382 		unlock_fpu_owner();
383 
384 		err |= __put_user(FPU_CTX_MAGIC, &info->magic);
385 		err |= __put_user(extctx->fpu.size, &info->size);
386 
387 		if (likely(!err))
388 			break;
389 		/* Touch the FPU context and try again */
390 		err = __put_user(0, &regs[0]) |
391 			__put_user(0, &regs[31]) |
392 			__put_user(0, fcc) |
393 			__put_user(0, fcsr);
394 		if (err)
395 			return err;	/* really bad sigcontext */
396 	}
397 
398 	return err;
399 }
400 
protected_restore_fpu_context(struct extctx_layout * extctx)401 static int protected_restore_fpu_context(struct extctx_layout *extctx)
402 {
403 	int err = 0, sig = 0, tmp __maybe_unused;
404 	struct sctx_info __user *info = extctx->fpu.addr;
405 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
406 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
407 	uint64_t __user *fcc	= &fpu_ctx->fcc;
408 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
409 
410 	err = sig = fcsr_pending(fcsr);
411 	if (err < 0)
412 		return err;
413 
414 	while (1) {
415 		lock_fpu_owner();
416 		if (is_fpu_owner())
417 			err = restore_hw_fpu_context(fpu_ctx);
418 		else
419 			err = copy_fpu_from_sigcontext(fpu_ctx);
420 		unlock_fpu_owner();
421 
422 		if (likely(!err))
423 			break;
424 		/* Touch the FPU context and try again */
425 		err = __get_user(tmp, &regs[0]) |
426 			__get_user(tmp, &regs[31]) |
427 			__get_user(tmp, fcc) |
428 			__get_user(tmp, fcsr);
429 		if (err)
430 			break;	/* really bad sigcontext */
431 	}
432 
433 	return err ?: sig;
434 }
435 
protected_save_lsx_context(struct extctx_layout * extctx)436 static int protected_save_lsx_context(struct extctx_layout *extctx)
437 {
438 	int err = 0;
439 	struct sctx_info __user *info = extctx->lsx.addr;
440 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
441 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
442 	uint64_t __user *fcc	= &lsx_ctx->fcc;
443 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
444 
445 	while (1) {
446 		lock_fpu_owner();
447 		if (is_lsx_enabled())
448 			err = save_hw_lsx_context(lsx_ctx);
449 		else {
450 			if (is_fpu_owner())
451 				save_fp(current);
452 			err = copy_lsx_to_sigcontext(lsx_ctx);
453 		}
454 		unlock_fpu_owner();
455 
456 		err |= __put_user(LSX_CTX_MAGIC, &info->magic);
457 		err |= __put_user(extctx->lsx.size, &info->size);
458 
459 		if (likely(!err))
460 			break;
461 		/* Touch the LSX context and try again */
462 		err = __put_user(0, &regs[0]) |
463 			__put_user(0, &regs[32*2-1]) |
464 			__put_user(0, fcc) |
465 			__put_user(0, fcsr);
466 		if (err)
467 			return err;	/* really bad sigcontext */
468 	}
469 
470 	return err;
471 }
472 
protected_restore_lsx_context(struct extctx_layout * extctx)473 static int protected_restore_lsx_context(struct extctx_layout *extctx)
474 {
475 	int err = 0, sig = 0, tmp __maybe_unused;
476 	struct sctx_info __user *info = extctx->lsx.addr;
477 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
478 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
479 	uint64_t __user *fcc	= &lsx_ctx->fcc;
480 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
481 
482 	err = sig = fcsr_pending(fcsr);
483 	if (err < 0)
484 		return err;
485 
486 	while (1) {
487 		lock_fpu_owner();
488 		if (is_lsx_enabled())
489 			err = restore_hw_lsx_context(lsx_ctx);
490 		else {
491 			err = copy_lsx_from_sigcontext(lsx_ctx);
492 			if (is_fpu_owner())
493 				restore_fp(current);
494 		}
495 		unlock_fpu_owner();
496 
497 		if (likely(!err))
498 			break;
499 		/* Touch the LSX context and try again */
500 		err = __get_user(tmp, &regs[0]) |
501 			__get_user(tmp, &regs[32*2-1]) |
502 			__get_user(tmp, fcc) |
503 			__get_user(tmp, fcsr);
504 		if (err)
505 			break;	/* really bad sigcontext */
506 	}
507 
508 	return err ?: sig;
509 }
510 
protected_save_lasx_context(struct extctx_layout * extctx)511 static int protected_save_lasx_context(struct extctx_layout *extctx)
512 {
513 	int err = 0;
514 	struct sctx_info __user *info = extctx->lasx.addr;
515 	struct lasx_context __user *lasx_ctx =
516 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
517 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
518 	uint64_t __user *fcc	= &lasx_ctx->fcc;
519 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
520 
521 	while (1) {
522 		lock_fpu_owner();
523 		if (is_lasx_enabled())
524 			err = save_hw_lasx_context(lasx_ctx);
525 		else {
526 			if (is_lsx_enabled())
527 				save_lsx(current);
528 			else if (is_fpu_owner())
529 				save_fp(current);
530 			err = copy_lasx_to_sigcontext(lasx_ctx);
531 		}
532 		unlock_fpu_owner();
533 
534 		err |= __put_user(LASX_CTX_MAGIC, &info->magic);
535 		err |= __put_user(extctx->lasx.size, &info->size);
536 
537 		if (likely(!err))
538 			break;
539 		/* Touch the LASX context and try again */
540 		err = __put_user(0, &regs[0]) |
541 			__put_user(0, &regs[32*4-1]) |
542 			__put_user(0, fcc) |
543 			__put_user(0, fcsr);
544 		if (err)
545 			return err;	/* really bad sigcontext */
546 	}
547 
548 	return err;
549 }
550 
protected_restore_lasx_context(struct extctx_layout * extctx)551 static int protected_restore_lasx_context(struct extctx_layout *extctx)
552 {
553 	int err = 0, sig = 0, tmp __maybe_unused;
554 	struct sctx_info __user *info = extctx->lasx.addr;
555 	struct lasx_context __user *lasx_ctx =
556 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
557 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
558 	uint64_t __user *fcc	= &lasx_ctx->fcc;
559 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
560 
561 	err = sig = fcsr_pending(fcsr);
562 	if (err < 0)
563 		return err;
564 
565 	while (1) {
566 		lock_fpu_owner();
567 		if (is_lasx_enabled())
568 			err = restore_hw_lasx_context(lasx_ctx);
569 		else {
570 			err = copy_lasx_from_sigcontext(lasx_ctx);
571 			if (is_lsx_enabled())
572 				restore_lsx(current);
573 			else if (is_fpu_owner())
574 				restore_fp(current);
575 		}
576 		unlock_fpu_owner();
577 
578 		if (likely(!err))
579 			break;
580 		/* Touch the LASX context and try again */
581 		err = __get_user(tmp, &regs[0]) |
582 			__get_user(tmp, &regs[32*4-1]) |
583 			__get_user(tmp, fcc) |
584 			__get_user(tmp, fcsr);
585 		if (err)
586 			break;	/* really bad sigcontext */
587 	}
588 
589 	return err ?: sig;
590 }
591 
592 #ifdef CONFIG_CPU_HAS_LBT
protected_save_lbt_context(struct extctx_layout * extctx)593 static int protected_save_lbt_context(struct extctx_layout *extctx)
594 {
595 	int err = 0;
596 	struct sctx_info __user *info = extctx->lbt.addr;
597 	struct lbt_context __user *lbt_ctx =
598 		(struct lbt_context *)get_ctx_through_ctxinfo(info);
599 	uint64_t __user *regs	= (uint64_t *)&lbt_ctx->regs;
600 	uint32_t __user *eflags	= (uint32_t *)&lbt_ctx->eflags;
601 
602 	while (1) {
603 		lock_lbt_owner();
604 		if (is_lbt_owner())
605 			err |= save_hw_lbt_context(lbt_ctx);
606 		else
607 			err |= copy_lbt_to_sigcontext(lbt_ctx);
608 		if (is_fpu_owner())
609 			err |= save_hw_ftop_context(lbt_ctx);
610 		else
611 			err |= copy_ftop_to_sigcontext(lbt_ctx);
612 		unlock_lbt_owner();
613 
614 		err |= __put_user(LBT_CTX_MAGIC, &info->magic);
615 		err |= __put_user(extctx->lbt.size, &info->size);
616 
617 		if (likely(!err))
618 			break;
619 		/* Touch the LBT context and try again */
620 		err = __put_user(0, &regs[0]) | __put_user(0, eflags);
621 
622 		if (err)
623 			return err;
624 	}
625 
626 	return err;
627 }
628 
protected_restore_lbt_context(struct extctx_layout * extctx)629 static int protected_restore_lbt_context(struct extctx_layout *extctx)
630 {
631 	int err = 0, tmp __maybe_unused;
632 	struct sctx_info __user *info = extctx->lbt.addr;
633 	struct lbt_context __user *lbt_ctx =
634 		(struct lbt_context *)get_ctx_through_ctxinfo(info);
635 	uint64_t __user *regs	= (uint64_t *)&lbt_ctx->regs;
636 	uint32_t __user *eflags	= (uint32_t *)&lbt_ctx->eflags;
637 
638 	while (1) {
639 		lock_lbt_owner();
640 		if (is_lbt_owner())
641 			err |= restore_hw_lbt_context(lbt_ctx);
642 		else
643 			err |= copy_lbt_from_sigcontext(lbt_ctx);
644 		if (is_fpu_owner())
645 			err |= restore_hw_ftop_context(lbt_ctx);
646 		else
647 			err |= copy_ftop_from_sigcontext(lbt_ctx);
648 		unlock_lbt_owner();
649 
650 		if (likely(!err))
651 			break;
652 		/* Touch the LBT context and try again */
653 		err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
654 
655 		if (err)
656 			return err;
657 	}
658 
659 	return err;
660 }
661 #endif
662 
setup_sigcontext(struct pt_regs * regs,struct sigcontext __user * sc,struct extctx_layout * extctx)663 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
664 			    struct extctx_layout *extctx)
665 {
666 	int i, err = 0;
667 	struct sctx_info __user *info;
668 
669 	err |= __put_user(regs->csr_era, &sc->sc_pc);
670 	err |= __put_user(extctx->flags, &sc->sc_flags);
671 
672 	err |= __put_user(0, &sc->sc_regs[0]);
673 	for (i = 1; i < 32; i++)
674 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
675 
676 #ifdef CONFIG_CPU_HAS_LBT
677 	if (extctx->lbt.addr)
678 		err |= protected_save_lbt_context(extctx);
679 #endif
680 
681 	if (extctx->lasx.addr)
682 		err |= protected_save_lasx_context(extctx);
683 	else if (extctx->lsx.addr)
684 		err |= protected_save_lsx_context(extctx);
685 	else if (extctx->fpu.addr)
686 		err |= protected_save_fpu_context(extctx);
687 
688 	/* Set the "end" magic */
689 	info = (struct sctx_info *)extctx->end.addr;
690 	err |= __put_user(0, &info->magic);
691 	err |= __put_user(0, &info->size);
692 
693 	return err;
694 }
695 
parse_extcontext(struct sigcontext __user * sc,struct extctx_layout * extctx)696 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
697 {
698 	int err = 0;
699 	unsigned int magic, size;
700 	struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
701 
702 	while(1) {
703 		err |= __get_user(magic, &info->magic);
704 		err |= __get_user(size, &info->size);
705 		if (err)
706 			return err;
707 
708 		switch (magic) {
709 		case 0: /* END */
710 			goto done;
711 
712 		case FPU_CTX_MAGIC:
713 			if (size < (sizeof(struct sctx_info) +
714 				    sizeof(struct fpu_context)))
715 				goto invalid;
716 			extctx->fpu.addr = info;
717 			break;
718 
719 		case LSX_CTX_MAGIC:
720 			if (size < (sizeof(struct sctx_info) +
721 				    sizeof(struct lsx_context)))
722 				goto invalid;
723 			extctx->lsx.addr = info;
724 			break;
725 
726 		case LASX_CTX_MAGIC:
727 			if (size < (sizeof(struct sctx_info) +
728 				    sizeof(struct lasx_context)))
729 				goto invalid;
730 			extctx->lasx.addr = info;
731 			break;
732 
733 		case LBT_CTX_MAGIC:
734 			if (size < (sizeof(struct sctx_info) +
735 				    sizeof(struct lbt_context)))
736 				goto invalid;
737 			extctx->lbt.addr = info;
738 			break;
739 
740 		default:
741 			goto invalid;
742 		}
743 
744 		info = (struct sctx_info *)((char *)info + size);
745 	}
746 
747 done:
748 	return 0;
749 
750 invalid:
751 	return -EINVAL;
752 }
753 
restore_sigcontext(struct pt_regs * regs,struct sigcontext __user * sc)754 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
755 {
756 	int i, err = 0;
757 	struct extctx_layout extctx;
758 
759 	memset(&extctx, 0, sizeof(struct extctx_layout));
760 
761 	err = __get_user(extctx.flags, &sc->sc_flags);
762 	if (err)
763 		goto bad;
764 
765 	err = parse_extcontext(sc, &extctx);
766 	if (err)
767 		goto bad;
768 
769 	conditional_used_math(extctx.flags & SC_USED_FP);
770 
771 	/*
772 	 * The signal handler may have used FPU; give it up if the program
773 	 * doesn't want it following sigreturn.
774 	 */
775 	if (!(extctx.flags & SC_USED_FP))
776 		lose_fpu(0);
777 
778 	/* Always make any pending restarted system calls return -EINTR */
779 	current->restart_block.fn = do_no_restart_syscall;
780 
781 	err |= __get_user(regs->csr_era, &sc->sc_pc);
782 	for (i = 1; i < 32; i++)
783 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
784 
785 	if (extctx.lasx.addr)
786 		err |= protected_restore_lasx_context(&extctx);
787 	else if (extctx.lsx.addr)
788 		err |= protected_restore_lsx_context(&extctx);
789 	else if (extctx.fpu.addr)
790 		err |= protected_restore_fpu_context(&extctx);
791 
792 #ifdef CONFIG_CPU_HAS_LBT
793 	if (extctx.lbt.addr)
794 		err |= protected_restore_lbt_context(&extctx);
795 #endif
796 
797 bad:
798 	return err;
799 }
800 
handle_flags(void)801 static unsigned int handle_flags(void)
802 {
803 	unsigned int flags = 0;
804 
805 	flags = used_math() ? SC_USED_FP : 0;
806 
807 	switch (current->thread.error_code) {
808 	case 1:
809 		flags |= SC_ADDRERR_RD;
810 		break;
811 	case 2:
812 		flags |= SC_ADDRERR_WR;
813 		break;
814 	}
815 
816 	return flags;
817 }
818 
extframe_alloc(struct extctx_layout * extctx,struct _ctx_layout * layout,size_t size,unsigned int align,unsigned long base)819 static unsigned long extframe_alloc(struct extctx_layout *extctx,
820 				    struct _ctx_layout *layout,
821 				    size_t size, unsigned int align, unsigned long base)
822 {
823 	unsigned long new_base = base - size;
824 
825 	new_base = round_down(new_base, (align < 16 ? 16 : align));
826 	new_base -= sizeof(struct sctx_info);
827 
828 	layout->addr = (void *)new_base;
829 	layout->size = (unsigned int)(base - new_base);
830 	extctx->size += layout->size;
831 
832 	return new_base;
833 }
834 
setup_extcontext(struct extctx_layout * extctx,unsigned long sp)835 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
836 {
837 	unsigned long new_sp = sp;
838 
839 	memset(extctx, 0, sizeof(struct extctx_layout));
840 
841 	extctx->flags = handle_flags();
842 
843 	/* Grow down, alloc "end" context info first. */
844 	new_sp -= sizeof(struct sctx_info);
845 	extctx->end.addr = (void *)new_sp;
846 	extctx->end.size = (unsigned int)sizeof(struct sctx_info);
847 	extctx->size += extctx->end.size;
848 
849 	if (extctx->flags & SC_USED_FP) {
850 		if (cpu_has_lasx && thread_lasx_context_live())
851 			new_sp = extframe_alloc(extctx, &extctx->lasx,
852 			  sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
853 		else if (cpu_has_lsx && thread_lsx_context_live())
854 			new_sp = extframe_alloc(extctx, &extctx->lsx,
855 			  sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
856 		else if (cpu_has_fpu)
857 			new_sp = extframe_alloc(extctx, &extctx->fpu,
858 			  sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
859 	}
860 
861 #ifdef CONFIG_CPU_HAS_LBT
862 	if (cpu_has_lbt && thread_lbt_context_live()) {
863 		new_sp = extframe_alloc(extctx, &extctx->lbt,
864 			  sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
865 	}
866 #endif
867 
868 	return new_sp;
869 }
870 
get_sigframe(struct ksignal * ksig,struct pt_regs * regs,struct extctx_layout * extctx)871 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
872 				 struct extctx_layout *extctx)
873 {
874 	unsigned long sp;
875 
876 	/* Default to using normal stack */
877 	sp = regs->regs[3];
878 
879 	/*
880 	 * If we are on the alternate signal stack and would overflow it, don't.
881 	 * Return an always-bogus address instead so we will die with SIGSEGV.
882 	 */
883 	if (on_sig_stack(sp) &&
884 	    !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
885 		return (void __user __force *)(-1UL);
886 
887 	sp = sigsp(sp, ksig);
888 	sp = round_down(sp, 16);
889 	sp = setup_extcontext(extctx, sp);
890 	sp -= sizeof(struct rt_sigframe);
891 
892 	if (!IS_ALIGNED(sp, 16))
893 		BUG();
894 
895 	return (void __user *)sp;
896 }
897 
898 /*
899  * Atomically swap in the new signal mask, and wait for a signal.
900  */
901 
SYSCALL_DEFINE0(rt_sigreturn)902 SYSCALL_DEFINE0(rt_sigreturn)
903 {
904 	int sig;
905 	sigset_t set;
906 	struct pt_regs *regs;
907 	struct rt_sigframe __user *frame;
908 
909 	regs = current_pt_regs();
910 	frame = (struct rt_sigframe __user *)regs->regs[3];
911 	if (!access_ok(frame, sizeof(*frame)))
912 		goto badframe;
913 	if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
914 		goto badframe;
915 
916 	set_current_blocked(&set);
917 
918 	sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
919 	if (sig < 0)
920 		goto badframe;
921 	else if (sig)
922 		force_sig(sig);
923 
924 	regs->regs[0] = 0; /* No syscall restarting */
925 	if (restore_altstack(&frame->rs_uctx.uc_stack))
926 		goto badframe;
927 
928 	return regs->regs[4];
929 
930 badframe:
931 	force_sig(SIGSEGV);
932 	return 0;
933 }
934 
setup_rt_frame(void * sig_return,struct ksignal * ksig,struct pt_regs * regs,sigset_t * set)935 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
936 			  struct pt_regs *regs, sigset_t *set)
937 {
938 	int err = 0;
939 	struct extctx_layout extctx;
940 	struct rt_sigframe __user *frame;
941 
942 	frame = get_sigframe(ksig, regs, &extctx);
943 	if (!access_ok(frame, sizeof(*frame) + extctx.size))
944 		return -EFAULT;
945 
946 	/* Create siginfo.  */
947 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
948 
949 	/* Create the ucontext.	 */
950 	err |= __put_user(0, &frame->rs_uctx.uc_flags);
951 	err |= __put_user(NULL, &frame->rs_uctx.uc_link);
952 	err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
953 	err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
954 	err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
955 
956 	if (err)
957 		return -EFAULT;
958 
959 	/*
960 	 * Arguments to signal handler:
961 	 *
962 	 *   a0 = signal number
963 	 *   a1 = pointer to siginfo
964 	 *   a2 = pointer to ucontext
965 	 *
966 	 * c0_era point to the signal handler, $r3 (sp) points to
967 	 * the struct rt_sigframe.
968 	 */
969 	regs->regs[4] = ksig->sig;
970 	regs->regs[5] = (unsigned long) &frame->rs_info;
971 	regs->regs[6] = (unsigned long) &frame->rs_uctx;
972 	regs->regs[3] = (unsigned long) frame;
973 	regs->regs[1] = (unsigned long) sig_return;
974 	regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
975 
976 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
977 	       current->comm, current->pid,
978 	       frame, regs->csr_era, regs->regs[1]);
979 
980 	return 0;
981 }
982 
handle_signal(struct ksignal * ksig,struct pt_regs * regs)983 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
984 {
985 	int ret;
986 	sigset_t *oldset = sigmask_to_save();
987 	void *vdso = current->mm->context.vdso;
988 
989 	/* Are we from a system call? */
990 	if (regs->regs[0]) {
991 		switch (regs->regs[4]) {
992 		case -ERESTART_RESTARTBLOCK:
993 		case -ERESTARTNOHAND:
994 			regs->regs[4] = -EINTR;
995 			break;
996 		case -ERESTARTSYS:
997 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
998 				regs->regs[4] = -EINTR;
999 				break;
1000 			}
1001 			fallthrough;
1002 		case -ERESTARTNOINTR:
1003 			regs->regs[4] = regs->orig_a0;
1004 			regs->csr_era -= 4;
1005 		}
1006 
1007 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
1008 	}
1009 
1010 	rseq_signal_deliver(ksig, regs);
1011 
1012 	ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
1013 
1014 	signal_setup_done(ret, ksig, 0);
1015 }
1016 
arch_do_signal_or_restart(struct pt_regs * regs)1017 void arch_do_signal_or_restart(struct pt_regs *regs)
1018 {
1019 	struct ksignal ksig;
1020 
1021 	if (get_signal(&ksig)) {
1022 		/* Whee!  Actually deliver the signal.	*/
1023 		handle_signal(&ksig, regs);
1024 		return;
1025 	}
1026 
1027 	/* Are we from a system call? */
1028 	if (regs->regs[0]) {
1029 		switch (regs->regs[4]) {
1030 		case -ERESTARTNOHAND:
1031 		case -ERESTARTSYS:
1032 		case -ERESTARTNOINTR:
1033 			regs->regs[4] = regs->orig_a0;
1034 			regs->csr_era -= 4;
1035 			break;
1036 
1037 		case -ERESTART_RESTARTBLOCK:
1038 			regs->regs[4] = regs->orig_a0;
1039 			regs->regs[11] = __NR_restart_syscall;
1040 			regs->csr_era -= 4;
1041 			break;
1042 		}
1043 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
1044 	}
1045 
1046 	/*
1047 	 * If there's no signal to deliver, we just put the saved sigmask
1048 	 * back
1049 	 */
1050 	restore_saved_sigmask();
1051 }
1052