xref: /linux/arch/loongarch/kernel/signal.c (revision 4ae68b26c3ab5a82aa271e6e9fc9b1a06e1d6b40)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  *
7  * Derived from MIPS:
8  * Copyright (C) 1991, 1992  Linus Torvalds
9  * Copyright (C) 1994 - 2000  Ralf Baechle
10  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11  * Copyright (C) 2014, Imagination Technologies Ltd.
12  */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/irqflags.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/personality.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/signal.h>
23 #include <linux/errno.h>
24 #include <linux/wait.h>
25 #include <linux/ptrace.h>
26 #include <linux/unistd.h>
27 #include <linux/compiler.h>
28 #include <linux/syscalls.h>
29 #include <linux/uaccess.h>
30 
31 #include <asm/asm.h>
32 #include <asm/cacheflush.h>
33 #include <asm/cpu-features.h>
34 #include <asm/fpu.h>
35 #include <asm/ucontext.h>
36 #include <asm/vdso.h>
37 
38 #ifdef DEBUG_SIG
39 #  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
40 #else
41 #  define DEBUGP(fmt, args...)
42 #endif
43 
44 /* Make sure we will not lose FPU ownership */
45 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
46 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
47 
48 /* Assembly functions to move context to/from the FPU */
49 extern asmlinkage int
50 _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
51 extern asmlinkage int
52 _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
53 extern asmlinkage int
54 _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
55 extern asmlinkage int
56 _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
57 extern asmlinkage int
58 _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
59 extern asmlinkage int
60 _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
61 
62 struct rt_sigframe {
63 	struct siginfo rs_info;
64 	struct ucontext rs_uctx;
65 };
66 
67 struct _ctx_layout {
68 	struct sctx_info *addr;
69 	unsigned int size;
70 };
71 
72 struct extctx_layout {
73 	unsigned long size;
74 	unsigned int flags;
75 	struct _ctx_layout fpu;
76 	struct _ctx_layout lsx;
77 	struct _ctx_layout lasx;
78 	struct _ctx_layout end;
79 };
80 
81 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
82 {
83 	return (void __user *)((char *)info + sizeof(struct sctx_info));
84 }
85 
86 /*
87  * Thread saved context copy to/from a signal context presumed to be on the
88  * user stack, and therefore accessed with appropriate macros from uaccess.h.
89  */
90 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
91 {
92 	int i;
93 	int err = 0;
94 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
95 	uint64_t __user *fcc	= &ctx->fcc;
96 	uint32_t __user *fcsr	= &ctx->fcsr;
97 
98 	for (i = 0; i < NUM_FPU_REGS; i++) {
99 		err |=
100 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
101 			       &regs[i]);
102 	}
103 	err |= __put_user(current->thread.fpu.fcc, fcc);
104 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
105 
106 	return err;
107 }
108 
109 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
110 {
111 	int i;
112 	int err = 0;
113 	u64 fpr_val;
114 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
115 	uint64_t __user *fcc	= &ctx->fcc;
116 	uint32_t __user *fcsr	= &ctx->fcsr;
117 
118 	for (i = 0; i < NUM_FPU_REGS; i++) {
119 		err |= __get_user(fpr_val, &regs[i]);
120 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
121 	}
122 	err |= __get_user(current->thread.fpu.fcc, fcc);
123 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
124 
125 	return err;
126 }
127 
128 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
129 {
130 	int i;
131 	int err = 0;
132 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
133 	uint64_t __user *fcc	= &ctx->fcc;
134 	uint32_t __user *fcsr	= &ctx->fcsr;
135 
136 	for (i = 0; i < NUM_FPU_REGS; i++) {
137 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
138 				  &regs[2*i]);
139 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
140 				  &regs[2*i+1]);
141 	}
142 	err |= __put_user(current->thread.fpu.fcc, fcc);
143 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
144 
145 	return err;
146 }
147 
148 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
149 {
150 	int i;
151 	int err = 0;
152 	u64 fpr_val;
153 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
154 	uint64_t __user *fcc	= &ctx->fcc;
155 	uint32_t __user *fcsr	= &ctx->fcsr;
156 
157 	for (i = 0; i < NUM_FPU_REGS; i++) {
158 		err |= __get_user(fpr_val, &regs[2*i]);
159 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
160 		err |= __get_user(fpr_val, &regs[2*i+1]);
161 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
162 	}
163 	err |= __get_user(current->thread.fpu.fcc, fcc);
164 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
165 
166 	return err;
167 }
168 
169 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
170 {
171 	int i;
172 	int err = 0;
173 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
174 	uint64_t __user *fcc	= &ctx->fcc;
175 	uint32_t __user *fcsr	= &ctx->fcsr;
176 
177 	for (i = 0; i < NUM_FPU_REGS; i++) {
178 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
179 				  &regs[4*i]);
180 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
181 				  &regs[4*i+1]);
182 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
183 				  &regs[4*i+2]);
184 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
185 				  &regs[4*i+3]);
186 	}
187 	err |= __put_user(current->thread.fpu.fcc, fcc);
188 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
189 
190 	return err;
191 }
192 
193 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
194 {
195 	int i;
196 	int err = 0;
197 	u64 fpr_val;
198 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
199 	uint64_t __user *fcc	= &ctx->fcc;
200 	uint32_t __user *fcsr	= &ctx->fcsr;
201 
202 	for (i = 0; i < NUM_FPU_REGS; i++) {
203 		err |= __get_user(fpr_val, &regs[4*i]);
204 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
205 		err |= __get_user(fpr_val, &regs[4*i+1]);
206 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
207 		err |= __get_user(fpr_val, &regs[4*i+2]);
208 		set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
209 		err |= __get_user(fpr_val, &regs[4*i+3]);
210 		set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
211 	}
212 	err |= __get_user(current->thread.fpu.fcc, fcc);
213 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
214 
215 	return err;
216 }
217 
218 /*
219  * Wrappers for the assembly _{save,restore}_fp_context functions.
220  */
221 static int save_hw_fpu_context(struct fpu_context __user *ctx)
222 {
223 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
224 	uint64_t __user *fcc	= &ctx->fcc;
225 	uint32_t __user *fcsr	= &ctx->fcsr;
226 
227 	return _save_fp_context(regs, fcc, fcsr);
228 }
229 
230 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
231 {
232 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
233 	uint64_t __user *fcc	= &ctx->fcc;
234 	uint32_t __user *fcsr	= &ctx->fcsr;
235 
236 	return _restore_fp_context(regs, fcc, fcsr);
237 }
238 
239 static int save_hw_lsx_context(struct lsx_context __user *ctx)
240 {
241 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
242 	uint64_t __user *fcc	= &ctx->fcc;
243 	uint32_t __user *fcsr	= &ctx->fcsr;
244 
245 	return _save_lsx_context(regs, fcc, fcsr);
246 }
247 
248 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
249 {
250 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
251 	uint64_t __user *fcc	= &ctx->fcc;
252 	uint32_t __user *fcsr	= &ctx->fcsr;
253 
254 	return _restore_lsx_context(regs, fcc, fcsr);
255 }
256 
257 static int save_hw_lasx_context(struct lasx_context __user *ctx)
258 {
259 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
260 	uint64_t __user *fcc	= &ctx->fcc;
261 	uint32_t __user *fcsr	= &ctx->fcsr;
262 
263 	return _save_lasx_context(regs, fcc, fcsr);
264 }
265 
266 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
267 {
268 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
269 	uint64_t __user *fcc	= &ctx->fcc;
270 	uint32_t __user *fcsr	= &ctx->fcsr;
271 
272 	return _restore_lasx_context(regs, fcc, fcsr);
273 }
274 
275 static int fcsr_pending(unsigned int __user *fcsr)
276 {
277 	int err, sig = 0;
278 	unsigned int csr, enabled;
279 
280 	err = __get_user(csr, fcsr);
281 	enabled = ((csr & FPU_CSR_ALL_E) << 24);
282 	/*
283 	 * If the signal handler set some FPU exceptions, clear it and
284 	 * send SIGFPE.
285 	 */
286 	if (csr & enabled) {
287 		csr &= ~enabled;
288 		err |= __put_user(csr, fcsr);
289 		sig = SIGFPE;
290 	}
291 	return err ?: sig;
292 }
293 
294 /*
295  * Helper routines
296  */
297 static int protected_save_fpu_context(struct extctx_layout *extctx)
298 {
299 	int err = 0;
300 	struct sctx_info __user *info = extctx->fpu.addr;
301 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
302 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
303 	uint64_t __user *fcc	= &fpu_ctx->fcc;
304 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
305 
306 	while (1) {
307 		lock_fpu_owner();
308 		if (is_fpu_owner())
309 			err = save_hw_fpu_context(fpu_ctx);
310 		else
311 			err = copy_fpu_to_sigcontext(fpu_ctx);
312 		unlock_fpu_owner();
313 
314 		err |= __put_user(FPU_CTX_MAGIC, &info->magic);
315 		err |= __put_user(extctx->fpu.size, &info->size);
316 
317 		if (likely(!err))
318 			break;
319 		/* Touch the FPU context and try again */
320 		err = __put_user(0, &regs[0]) |
321 			__put_user(0, &regs[31]) |
322 			__put_user(0, fcc) |
323 			__put_user(0, fcsr);
324 		if (err)
325 			return err;	/* really bad sigcontext */
326 	}
327 
328 	return err;
329 }
330 
331 static int protected_restore_fpu_context(struct extctx_layout *extctx)
332 {
333 	int err = 0, sig = 0, tmp __maybe_unused;
334 	struct sctx_info __user *info = extctx->fpu.addr;
335 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
336 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
337 	uint64_t __user *fcc	= &fpu_ctx->fcc;
338 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
339 
340 	err = sig = fcsr_pending(fcsr);
341 	if (err < 0)
342 		return err;
343 
344 	while (1) {
345 		lock_fpu_owner();
346 		if (is_fpu_owner())
347 			err = restore_hw_fpu_context(fpu_ctx);
348 		else
349 			err = copy_fpu_from_sigcontext(fpu_ctx);
350 		unlock_fpu_owner();
351 
352 		if (likely(!err))
353 			break;
354 		/* Touch the FPU context and try again */
355 		err = __get_user(tmp, &regs[0]) |
356 			__get_user(tmp, &regs[31]) |
357 			__get_user(tmp, fcc) |
358 			__get_user(tmp, fcsr);
359 		if (err)
360 			break;	/* really bad sigcontext */
361 	}
362 
363 	return err ?: sig;
364 }
365 
366 static int protected_save_lsx_context(struct extctx_layout *extctx)
367 {
368 	int err = 0;
369 	struct sctx_info __user *info = extctx->lsx.addr;
370 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
371 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
372 	uint64_t __user *fcc	= &lsx_ctx->fcc;
373 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
374 
375 	while (1) {
376 		lock_fpu_owner();
377 		if (is_lsx_enabled())
378 			err = save_hw_lsx_context(lsx_ctx);
379 		else {
380 			if (is_fpu_owner())
381 				save_fp(current);
382 			err = copy_lsx_to_sigcontext(lsx_ctx);
383 		}
384 		unlock_fpu_owner();
385 
386 		err |= __put_user(LSX_CTX_MAGIC, &info->magic);
387 		err |= __put_user(extctx->lsx.size, &info->size);
388 
389 		if (likely(!err))
390 			break;
391 		/* Touch the LSX context and try again */
392 		err = __put_user(0, &regs[0]) |
393 			__put_user(0, &regs[32*2-1]) |
394 			__put_user(0, fcc) |
395 			__put_user(0, fcsr);
396 		if (err)
397 			return err;	/* really bad sigcontext */
398 	}
399 
400 	return err;
401 }
402 
403 static int protected_restore_lsx_context(struct extctx_layout *extctx)
404 {
405 	int err = 0, sig = 0, tmp __maybe_unused;
406 	struct sctx_info __user *info = extctx->lsx.addr;
407 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
408 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
409 	uint64_t __user *fcc	= &lsx_ctx->fcc;
410 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
411 
412 	err = sig = fcsr_pending(fcsr);
413 	if (err < 0)
414 		return err;
415 
416 	while (1) {
417 		lock_fpu_owner();
418 		if (is_lsx_enabled())
419 			err = restore_hw_lsx_context(lsx_ctx);
420 		else {
421 			err = copy_lsx_from_sigcontext(lsx_ctx);
422 			if (is_fpu_owner())
423 				restore_fp(current);
424 		}
425 		unlock_fpu_owner();
426 
427 		if (likely(!err))
428 			break;
429 		/* Touch the LSX context and try again */
430 		err = __get_user(tmp, &regs[0]) |
431 			__get_user(tmp, &regs[32*2-1]) |
432 			__get_user(tmp, fcc) |
433 			__get_user(tmp, fcsr);
434 		if (err)
435 			break;	/* really bad sigcontext */
436 	}
437 
438 	return err ?: sig;
439 }
440 
441 static int protected_save_lasx_context(struct extctx_layout *extctx)
442 {
443 	int err = 0;
444 	struct sctx_info __user *info = extctx->lasx.addr;
445 	struct lasx_context __user *lasx_ctx =
446 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
447 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
448 	uint64_t __user *fcc	= &lasx_ctx->fcc;
449 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
450 
451 	while (1) {
452 		lock_fpu_owner();
453 		if (is_lasx_enabled())
454 			err = save_hw_lasx_context(lasx_ctx);
455 		else {
456 			if (is_lsx_enabled())
457 				save_lsx(current);
458 			else if (is_fpu_owner())
459 				save_fp(current);
460 			err = copy_lasx_to_sigcontext(lasx_ctx);
461 		}
462 		unlock_fpu_owner();
463 
464 		err |= __put_user(LASX_CTX_MAGIC, &info->magic);
465 		err |= __put_user(extctx->lasx.size, &info->size);
466 
467 		if (likely(!err))
468 			break;
469 		/* Touch the LASX context and try again */
470 		err = __put_user(0, &regs[0]) |
471 			__put_user(0, &regs[32*4-1]) |
472 			__put_user(0, fcc) |
473 			__put_user(0, fcsr);
474 		if (err)
475 			return err;	/* really bad sigcontext */
476 	}
477 
478 	return err;
479 }
480 
481 static int protected_restore_lasx_context(struct extctx_layout *extctx)
482 {
483 	int err = 0, sig = 0, tmp __maybe_unused;
484 	struct sctx_info __user *info = extctx->lasx.addr;
485 	struct lasx_context __user *lasx_ctx =
486 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
487 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
488 	uint64_t __user *fcc	= &lasx_ctx->fcc;
489 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
490 
491 	err = sig = fcsr_pending(fcsr);
492 	if (err < 0)
493 		return err;
494 
495 	while (1) {
496 		lock_fpu_owner();
497 		if (is_lasx_enabled())
498 			err = restore_hw_lasx_context(lasx_ctx);
499 		else {
500 			err = copy_lasx_from_sigcontext(lasx_ctx);
501 			if (is_lsx_enabled())
502 				restore_lsx(current);
503 			else if (is_fpu_owner())
504 				restore_fp(current);
505 		}
506 		unlock_fpu_owner();
507 
508 		if (likely(!err))
509 			break;
510 		/* Touch the LASX context and try again */
511 		err = __get_user(tmp, &regs[0]) |
512 			__get_user(tmp, &regs[32*4-1]) |
513 			__get_user(tmp, fcc) |
514 			__get_user(tmp, fcsr);
515 		if (err)
516 			break;	/* really bad sigcontext */
517 	}
518 
519 	return err ?: sig;
520 }
521 
522 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
523 			    struct extctx_layout *extctx)
524 {
525 	int i, err = 0;
526 	struct sctx_info __user *info;
527 
528 	err |= __put_user(regs->csr_era, &sc->sc_pc);
529 	err |= __put_user(extctx->flags, &sc->sc_flags);
530 
531 	err |= __put_user(0, &sc->sc_regs[0]);
532 	for (i = 1; i < 32; i++)
533 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
534 
535 	if (extctx->lasx.addr)
536 		err |= protected_save_lasx_context(extctx);
537 	else if (extctx->lsx.addr)
538 		err |= protected_save_lsx_context(extctx);
539 	else if (extctx->fpu.addr)
540 		err |= protected_save_fpu_context(extctx);
541 
542 	/* Set the "end" magic */
543 	info = (struct sctx_info *)extctx->end.addr;
544 	err |= __put_user(0, &info->magic);
545 	err |= __put_user(0, &info->size);
546 
547 	return err;
548 }
549 
550 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
551 {
552 	int err = 0;
553 	unsigned int magic, size;
554 	struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
555 
556 	while(1) {
557 		err |= __get_user(magic, &info->magic);
558 		err |= __get_user(size, &info->size);
559 		if (err)
560 			return err;
561 
562 		switch (magic) {
563 		case 0: /* END */
564 			goto done;
565 
566 		case FPU_CTX_MAGIC:
567 			if (size < (sizeof(struct sctx_info) +
568 				    sizeof(struct fpu_context)))
569 				goto invalid;
570 			extctx->fpu.addr = info;
571 			break;
572 
573 		case LSX_CTX_MAGIC:
574 			if (size < (sizeof(struct sctx_info) +
575 				    sizeof(struct lsx_context)))
576 				goto invalid;
577 			extctx->lsx.addr = info;
578 			break;
579 
580 		case LASX_CTX_MAGIC:
581 			if (size < (sizeof(struct sctx_info) +
582 				    sizeof(struct lasx_context)))
583 				goto invalid;
584 			extctx->lasx.addr = info;
585 			break;
586 
587 		default:
588 			goto invalid;
589 		}
590 
591 		info = (struct sctx_info *)((char *)info + size);
592 	}
593 
594 done:
595 	return 0;
596 
597 invalid:
598 	return -EINVAL;
599 }
600 
601 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
602 {
603 	int i, err = 0;
604 	struct extctx_layout extctx;
605 
606 	memset(&extctx, 0, sizeof(struct extctx_layout));
607 
608 	err = __get_user(extctx.flags, &sc->sc_flags);
609 	if (err)
610 		goto bad;
611 
612 	err = parse_extcontext(sc, &extctx);
613 	if (err)
614 		goto bad;
615 
616 	conditional_used_math(extctx.flags & SC_USED_FP);
617 
618 	/*
619 	 * The signal handler may have used FPU; give it up if the program
620 	 * doesn't want it following sigreturn.
621 	 */
622 	if (!(extctx.flags & SC_USED_FP))
623 		lose_fpu(0);
624 
625 	/* Always make any pending restarted system calls return -EINTR */
626 	current->restart_block.fn = do_no_restart_syscall;
627 
628 	err |= __get_user(regs->csr_era, &sc->sc_pc);
629 	for (i = 1; i < 32; i++)
630 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
631 
632 	if (extctx.lasx.addr)
633 		err |= protected_restore_lasx_context(&extctx);
634 	else if (extctx.lsx.addr)
635 		err |= protected_restore_lsx_context(&extctx);
636 	else if (extctx.fpu.addr)
637 		err |= protected_restore_fpu_context(&extctx);
638 
639 bad:
640 	return err;
641 }
642 
643 static unsigned int handle_flags(void)
644 {
645 	unsigned int flags = 0;
646 
647 	flags = used_math() ? SC_USED_FP : 0;
648 
649 	switch (current->thread.error_code) {
650 	case 1:
651 		flags |= SC_ADDRERR_RD;
652 		break;
653 	case 2:
654 		flags |= SC_ADDRERR_WR;
655 		break;
656 	}
657 
658 	return flags;
659 }
660 
661 static unsigned long extframe_alloc(struct extctx_layout *extctx,
662 				    struct _ctx_layout *layout,
663 				    size_t size, unsigned int align, unsigned long base)
664 {
665 	unsigned long new_base = base - size;
666 
667 	new_base = round_down(new_base, (align < 16 ? 16 : align));
668 	new_base -= sizeof(struct sctx_info);
669 
670 	layout->addr = (void *)new_base;
671 	layout->size = (unsigned int)(base - new_base);
672 	extctx->size += layout->size;
673 
674 	return new_base;
675 }
676 
677 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
678 {
679 	unsigned long new_sp = sp;
680 
681 	memset(extctx, 0, sizeof(struct extctx_layout));
682 
683 	extctx->flags = handle_flags();
684 
685 	/* Grow down, alloc "end" context info first. */
686 	new_sp -= sizeof(struct sctx_info);
687 	extctx->end.addr = (void *)new_sp;
688 	extctx->end.size = (unsigned int)sizeof(struct sctx_info);
689 	extctx->size += extctx->end.size;
690 
691 	if (extctx->flags & SC_USED_FP) {
692 		if (cpu_has_lasx && thread_lasx_context_live())
693 			new_sp = extframe_alloc(extctx, &extctx->lasx,
694 			  sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
695 		else if (cpu_has_lsx && thread_lsx_context_live())
696 			new_sp = extframe_alloc(extctx, &extctx->lsx,
697 			  sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
698 		else if (cpu_has_fpu)
699 			new_sp = extframe_alloc(extctx, &extctx->fpu,
700 			  sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
701 	}
702 
703 	return new_sp;
704 }
705 
706 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
707 			  struct extctx_layout *extctx)
708 {
709 	unsigned long sp;
710 
711 	/* Default to using normal stack */
712 	sp = regs->regs[3];
713 
714 	/*
715 	 * If we are on the alternate signal stack and would overflow it, don't.
716 	 * Return an always-bogus address instead so we will die with SIGSEGV.
717 	 */
718 	if (on_sig_stack(sp) &&
719 	    !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
720 		return (void __user __force *)(-1UL);
721 
722 	sp = sigsp(sp, ksig);
723 	sp = round_down(sp, 16);
724 	sp = setup_extcontext(extctx, sp);
725 	sp -= sizeof(struct rt_sigframe);
726 
727 	if (!IS_ALIGNED(sp, 16))
728 		BUG();
729 
730 	return (void __user *)sp;
731 }
732 
733 /*
734  * Atomically swap in the new signal mask, and wait for a signal.
735  */
736 
737 asmlinkage long sys_rt_sigreturn(void)
738 {
739 	int sig;
740 	sigset_t set;
741 	struct pt_regs *regs;
742 	struct rt_sigframe __user *frame;
743 
744 	regs = current_pt_regs();
745 	frame = (struct rt_sigframe __user *)regs->regs[3];
746 	if (!access_ok(frame, sizeof(*frame)))
747 		goto badframe;
748 	if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
749 		goto badframe;
750 
751 	set_current_blocked(&set);
752 
753 	sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
754 	if (sig < 0)
755 		goto badframe;
756 	else if (sig)
757 		force_sig(sig);
758 
759 	regs->regs[0] = 0; /* No syscall restarting */
760 	if (restore_altstack(&frame->rs_uctx.uc_stack))
761 		goto badframe;
762 
763 	return regs->regs[4];
764 
765 badframe:
766 	force_sig(SIGSEGV);
767 	return 0;
768 }
769 
770 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
771 			  struct pt_regs *regs, sigset_t *set)
772 {
773 	int err = 0;
774 	struct extctx_layout extctx;
775 	struct rt_sigframe __user *frame;
776 
777 	frame = get_sigframe(ksig, regs, &extctx);
778 	if (!access_ok(frame, sizeof(*frame) + extctx.size))
779 		return -EFAULT;
780 
781 	/* Create siginfo.  */
782 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
783 
784 	/* Create the ucontext.	 */
785 	err |= __put_user(0, &frame->rs_uctx.uc_flags);
786 	err |= __put_user(NULL, &frame->rs_uctx.uc_link);
787 	err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
788 	err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
789 	err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
790 
791 	if (err)
792 		return -EFAULT;
793 
794 	/*
795 	 * Arguments to signal handler:
796 	 *
797 	 *   a0 = signal number
798 	 *   a1 = pointer to siginfo
799 	 *   a2 = pointer to ucontext
800 	 *
801 	 * c0_era point to the signal handler, $r3 (sp) points to
802 	 * the struct rt_sigframe.
803 	 */
804 	regs->regs[4] = ksig->sig;
805 	regs->regs[5] = (unsigned long) &frame->rs_info;
806 	regs->regs[6] = (unsigned long) &frame->rs_uctx;
807 	regs->regs[3] = (unsigned long) frame;
808 	regs->regs[1] = (unsigned long) sig_return;
809 	regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
810 
811 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
812 	       current->comm, current->pid,
813 	       frame, regs->csr_era, regs->regs[1]);
814 
815 	return 0;
816 }
817 
818 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
819 {
820 	int ret;
821 	sigset_t *oldset = sigmask_to_save();
822 	void *vdso = current->mm->context.vdso;
823 
824 	/* Are we from a system call? */
825 	if (regs->regs[0]) {
826 		switch (regs->regs[4]) {
827 		case -ERESTART_RESTARTBLOCK:
828 		case -ERESTARTNOHAND:
829 			regs->regs[4] = -EINTR;
830 			break;
831 		case -ERESTARTSYS:
832 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
833 				regs->regs[4] = -EINTR;
834 				break;
835 			}
836 			fallthrough;
837 		case -ERESTARTNOINTR:
838 			regs->regs[4] = regs->orig_a0;
839 			regs->csr_era -= 4;
840 		}
841 
842 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
843 	}
844 
845 	rseq_signal_deliver(ksig, regs);
846 
847 	ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
848 
849 	signal_setup_done(ret, ksig, 0);
850 }
851 
852 void arch_do_signal_or_restart(struct pt_regs *regs)
853 {
854 	struct ksignal ksig;
855 
856 	if (get_signal(&ksig)) {
857 		/* Whee!  Actually deliver the signal.	*/
858 		handle_signal(&ksig, regs);
859 		return;
860 	}
861 
862 	/* Are we from a system call? */
863 	if (regs->regs[0]) {
864 		switch (regs->regs[4]) {
865 		case -ERESTARTNOHAND:
866 		case -ERESTARTSYS:
867 		case -ERESTARTNOINTR:
868 			regs->regs[4] = regs->orig_a0;
869 			regs->csr_era -= 4;
870 			break;
871 
872 		case -ERESTART_RESTARTBLOCK:
873 			regs->regs[4] = regs->orig_a0;
874 			regs->regs[11] = __NR_restart_syscall;
875 			regs->csr_era -= 4;
876 			break;
877 		}
878 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
879 	}
880 
881 	/*
882 	 * If there's no signal to deliver, we just put the saved sigmask
883 	 * back
884 	 */
885 	restore_saved_sigmask();
886 }
887