xref: /linux/arch/loongarch/kernel/signal.c (revision 9e56ff53b4115875667760445b028357848b4748)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  *
7  * Derived from MIPS:
8  * Copyright (C) 1991, 1992  Linus Torvalds
9  * Copyright (C) 1994 - 2000  Ralf Baechle
10  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11  * Copyright (C) 2014, Imagination Technologies Ltd.
12  */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/entry-common.h>
17 #include <linux/irqflags.h>
18 #include <linux/rseq.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/personality.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/wait.h>
27 #include <linux/ptrace.h>
28 #include <linux/unistd.h>
29 #include <linux/compiler.h>
30 #include <linux/syscalls.h>
31 #include <linux/uaccess.h>
32 
33 #include <asm/asm.h>
34 #include <asm/cacheflush.h>
35 #include <asm/cpu-features.h>
36 #include <asm/fpu.h>
37 #include <asm/lbt.h>
38 #include <asm/ucontext.h>
39 #include <asm/vdso.h>
40 
41 #ifdef DEBUG_SIG
42 #  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
43 #else
44 #  define DEBUGP(fmt, args...)
45 #endif
46 
47 /* Make sure we will not lose FPU ownership */
48 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
49 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
50 /* Make sure we will not lose LBT ownership */
51 #define lock_lbt_owner()	({ preempt_disable(); pagefault_disable(); })
52 #define unlock_lbt_owner()	({ pagefault_enable(); preempt_enable(); })
53 
54 /* Assembly functions to move context to/from the FPU */
55 extern asmlinkage int
56 _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
57 extern asmlinkage int
58 _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
59 extern asmlinkage int
60 _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
61 extern asmlinkage int
62 _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
63 extern asmlinkage int
64 _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
65 extern asmlinkage int
66 _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
67 
68 #ifdef CONFIG_CPU_HAS_LBT
69 extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
70 extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
71 extern asmlinkage int _save_ftop_context(void __user *ftop);
72 extern asmlinkage int _restore_ftop_context(void __user *ftop);
73 #endif
74 
75 struct rt_sigframe {
76 	struct siginfo rs_info;
77 	struct ucontext rs_uctx;
78 };
79 
80 struct _ctx_layout {
81 	struct sctx_info *addr;
82 	unsigned int size;
83 };
84 
85 struct extctx_layout {
86 	unsigned long size;
87 	unsigned int flags;
88 	struct _ctx_layout fpu;
89 	struct _ctx_layout lsx;
90 	struct _ctx_layout lasx;
91 	struct _ctx_layout lbt;
92 	struct _ctx_layout end;
93 };
94 
95 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
96 {
97 	return (void __user *)((char *)info + sizeof(struct sctx_info));
98 }
99 
100 /*
101  * Thread saved context copy to/from a signal context presumed to be on the
102  * user stack, and therefore accessed with appropriate macros from uaccess.h.
103  */
104 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
105 {
106 	int i;
107 	int err = 0;
108 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
109 	uint64_t __user *fcc	= &ctx->fcc;
110 	uint32_t __user *fcsr	= &ctx->fcsr;
111 
112 	for (i = 0; i < NUM_FPU_REGS; i++) {
113 		err |=
114 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
115 			       &regs[i]);
116 	}
117 	err |= __put_user(current->thread.fpu.fcc, fcc);
118 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
119 
120 	return err;
121 }
122 
123 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
124 {
125 	int i;
126 	int err = 0;
127 	u64 fpr_val;
128 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
129 	uint64_t __user *fcc	= &ctx->fcc;
130 	uint32_t __user *fcsr	= &ctx->fcsr;
131 
132 	for (i = 0; i < NUM_FPU_REGS; i++) {
133 		err |= __get_user(fpr_val, &regs[i]);
134 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
135 	}
136 	err |= __get_user(current->thread.fpu.fcc, fcc);
137 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
138 
139 	return err;
140 }
141 
142 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
143 {
144 	int i;
145 	int err = 0;
146 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
147 	uint64_t __user *fcc	= &ctx->fcc;
148 	uint32_t __user *fcsr	= &ctx->fcsr;
149 
150 	for (i = 0; i < NUM_FPU_REGS; i++) {
151 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
152 				  &regs[2*i]);
153 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
154 				  &regs[2*i+1]);
155 	}
156 	err |= __put_user(current->thread.fpu.fcc, fcc);
157 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
158 
159 	return err;
160 }
161 
162 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
163 {
164 	int i;
165 	int err = 0;
166 	u64 fpr_val;
167 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
168 	uint64_t __user *fcc	= &ctx->fcc;
169 	uint32_t __user *fcsr	= &ctx->fcsr;
170 
171 	for (i = 0; i < NUM_FPU_REGS; i++) {
172 		err |= __get_user(fpr_val, &regs[2*i]);
173 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
174 		err |= __get_user(fpr_val, &regs[2*i+1]);
175 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
176 	}
177 	err |= __get_user(current->thread.fpu.fcc, fcc);
178 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
179 
180 	return err;
181 }
182 
183 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
184 {
185 	int i;
186 	int err = 0;
187 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
188 	uint64_t __user *fcc	= &ctx->fcc;
189 	uint32_t __user *fcsr	= &ctx->fcsr;
190 
191 	for (i = 0; i < NUM_FPU_REGS; i++) {
192 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
193 				  &regs[4*i]);
194 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
195 				  &regs[4*i+1]);
196 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
197 				  &regs[4*i+2]);
198 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
199 				  &regs[4*i+3]);
200 	}
201 	err |= __put_user(current->thread.fpu.fcc, fcc);
202 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
203 
204 	return err;
205 }
206 
207 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
208 {
209 	int i;
210 	int err = 0;
211 	u64 fpr_val;
212 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
213 	uint64_t __user *fcc	= &ctx->fcc;
214 	uint32_t __user *fcsr	= &ctx->fcsr;
215 
216 	for (i = 0; i < NUM_FPU_REGS; i++) {
217 		err |= __get_user(fpr_val, &regs[4*i]);
218 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
219 		err |= __get_user(fpr_val, &regs[4*i+1]);
220 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
221 		err |= __get_user(fpr_val, &regs[4*i+2]);
222 		set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
223 		err |= __get_user(fpr_val, &regs[4*i+3]);
224 		set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
225 	}
226 	err |= __get_user(current->thread.fpu.fcc, fcc);
227 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
228 
229 	return err;
230 }
231 
232 #ifdef CONFIG_CPU_HAS_LBT
233 static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
234 {
235 	int err = 0;
236 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
237 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
238 
239 	err |= __put_user(current->thread.lbt.scr0, &regs[0]);
240 	err |= __put_user(current->thread.lbt.scr1, &regs[1]);
241 	err |= __put_user(current->thread.lbt.scr2, &regs[2]);
242 	err |= __put_user(current->thread.lbt.scr3, &regs[3]);
243 	err |= __put_user(current->thread.lbt.eflags, eflags);
244 
245 	return err;
246 }
247 
248 static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
249 {
250 	int err = 0;
251 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
252 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
253 
254 	err |= __get_user(current->thread.lbt.scr0, &regs[0]);
255 	err |= __get_user(current->thread.lbt.scr1, &regs[1]);
256 	err |= __get_user(current->thread.lbt.scr2, &regs[2]);
257 	err |= __get_user(current->thread.lbt.scr3, &regs[3]);
258 	err |= __get_user(current->thread.lbt.eflags, eflags);
259 
260 	return err;
261 }
262 
263 static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
264 {
265 	uint32_t  __user *ftop	= &ctx->ftop;
266 
267 	return __put_user(current->thread.fpu.ftop, ftop);
268 }
269 
270 static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
271 {
272 	uint32_t  __user *ftop	= &ctx->ftop;
273 
274 	return __get_user(current->thread.fpu.ftop, ftop);
275 }
276 #endif
277 
278 /*
279  * Wrappers for the assembly _{save,restore}_fp_context functions.
280  */
281 static int save_hw_fpu_context(struct fpu_context __user *ctx)
282 {
283 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
284 	uint64_t __user *fcc	= &ctx->fcc;
285 	uint32_t __user *fcsr	= &ctx->fcsr;
286 
287 	return _save_fp_context(regs, fcc, fcsr);
288 }
289 
290 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
291 {
292 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
293 	uint64_t __user *fcc	= &ctx->fcc;
294 	uint32_t __user *fcsr	= &ctx->fcsr;
295 
296 	return _restore_fp_context(regs, fcc, fcsr);
297 }
298 
299 static int save_hw_lsx_context(struct lsx_context __user *ctx)
300 {
301 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
302 	uint64_t __user *fcc	= &ctx->fcc;
303 	uint32_t __user *fcsr	= &ctx->fcsr;
304 
305 	return _save_lsx_context(regs, fcc, fcsr);
306 }
307 
308 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
309 {
310 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
311 	uint64_t __user *fcc	= &ctx->fcc;
312 	uint32_t __user *fcsr	= &ctx->fcsr;
313 
314 	return _restore_lsx_context(regs, fcc, fcsr);
315 }
316 
317 static int save_hw_lasx_context(struct lasx_context __user *ctx)
318 {
319 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
320 	uint64_t __user *fcc	= &ctx->fcc;
321 	uint32_t __user *fcsr	= &ctx->fcsr;
322 
323 	return _save_lasx_context(regs, fcc, fcsr);
324 }
325 
326 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
327 {
328 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
329 	uint64_t __user *fcc	= &ctx->fcc;
330 	uint32_t __user *fcsr	= &ctx->fcsr;
331 
332 	return _restore_lasx_context(regs, fcc, fcsr);
333 }
334 
335 /*
336  * Wrappers for the assembly _{save,restore}_lbt_context functions.
337  */
338 #ifdef CONFIG_CPU_HAS_LBT
339 static int save_hw_lbt_context(struct lbt_context __user *ctx)
340 {
341 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
342 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
343 
344 	return _save_lbt_context(regs, eflags);
345 }
346 
347 static int restore_hw_lbt_context(struct lbt_context __user *ctx)
348 {
349 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
350 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
351 
352 	return _restore_lbt_context(regs, eflags);
353 }
354 
355 static int save_hw_ftop_context(struct lbt_context __user *ctx)
356 {
357 	uint32_t __user *ftop	= &ctx->ftop;
358 
359 	return _save_ftop_context(ftop);
360 }
361 
362 static int restore_hw_ftop_context(struct lbt_context __user *ctx)
363 {
364 	uint32_t __user *ftop	= &ctx->ftop;
365 
366 	return _restore_ftop_context(ftop);
367 }
368 #endif
369 
370 static int fcsr_pending(unsigned int __user *fcsr)
371 {
372 	int err, sig = 0;
373 	unsigned int csr, enabled;
374 
375 	err = __get_user(csr, fcsr);
376 	enabled = ((csr & FPU_CSR_ALL_E) << 24);
377 	/*
378 	 * If the signal handler set some FPU exceptions, clear it and
379 	 * send SIGFPE.
380 	 */
381 	if (csr & enabled) {
382 		csr &= ~enabled;
383 		err |= __put_user(csr, fcsr);
384 		sig = SIGFPE;
385 	}
386 	return err ?: sig;
387 }
388 
389 /*
390  * Helper routines
391  */
392 static int protected_save_fpu_context(struct extctx_layout *extctx)
393 {
394 	int err = 0;
395 	struct sctx_info __user *info = extctx->fpu.addr;
396 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
397 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
398 	uint64_t __user *fcc	= &fpu_ctx->fcc;
399 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
400 
401 	while (1) {
402 		lock_fpu_owner();
403 		if (is_fpu_owner())
404 			err = save_hw_fpu_context(fpu_ctx);
405 		else
406 			err = copy_fpu_to_sigcontext(fpu_ctx);
407 		unlock_fpu_owner();
408 
409 		err |= __put_user(FPU_CTX_MAGIC, &info->magic);
410 		err |= __put_user(extctx->fpu.size, &info->size);
411 
412 		if (likely(!err))
413 			break;
414 		/* Touch the FPU context and try again */
415 		err = __put_user(0, &regs[0]) |
416 			__put_user(0, &regs[31]) |
417 			__put_user(0, fcc) |
418 			__put_user(0, fcsr);
419 		if (err)
420 			return err;	/* really bad sigcontext */
421 	}
422 
423 	return err;
424 }
425 
426 static int protected_restore_fpu_context(struct extctx_layout *extctx)
427 {
428 	int err = 0, sig = 0, tmp __maybe_unused;
429 	struct sctx_info __user *info = extctx->fpu.addr;
430 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
431 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
432 	uint64_t __user *fcc	= &fpu_ctx->fcc;
433 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
434 
435 	err = sig = fcsr_pending(fcsr);
436 	if (err < 0)
437 		return err;
438 
439 	while (1) {
440 		lock_fpu_owner();
441 		if (is_fpu_owner())
442 			err = restore_hw_fpu_context(fpu_ctx);
443 		else
444 			err = copy_fpu_from_sigcontext(fpu_ctx);
445 		unlock_fpu_owner();
446 
447 		if (likely(!err))
448 			break;
449 		/* Touch the FPU context and try again */
450 		err = __get_user(tmp, &regs[0]) |
451 			__get_user(tmp, &regs[31]) |
452 			__get_user(tmp, fcc) |
453 			__get_user(tmp, fcsr);
454 		if (err)
455 			break;	/* really bad sigcontext */
456 	}
457 
458 	return err ?: sig;
459 }
460 
461 static int protected_save_lsx_context(struct extctx_layout *extctx)
462 {
463 	int err = 0;
464 	struct sctx_info __user *info = extctx->lsx.addr;
465 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
466 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
467 	uint64_t __user *fcc	= &lsx_ctx->fcc;
468 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
469 
470 	while (1) {
471 		lock_fpu_owner();
472 		if (is_lsx_enabled())
473 			err = save_hw_lsx_context(lsx_ctx);
474 		else {
475 			if (is_fpu_owner())
476 				save_fp(current);
477 			err = copy_lsx_to_sigcontext(lsx_ctx);
478 		}
479 		unlock_fpu_owner();
480 
481 		err |= __put_user(LSX_CTX_MAGIC, &info->magic);
482 		err |= __put_user(extctx->lsx.size, &info->size);
483 
484 		if (likely(!err))
485 			break;
486 		/* Touch the LSX context and try again */
487 		err = __put_user(0, &regs[0]) |
488 			__put_user(0, &regs[32*2-1]) |
489 			__put_user(0, fcc) |
490 			__put_user(0, fcsr);
491 		if (err)
492 			return err;	/* really bad sigcontext */
493 	}
494 
495 	return err;
496 }
497 
498 static int protected_restore_lsx_context(struct extctx_layout *extctx)
499 {
500 	int err = 0, sig = 0, tmp __maybe_unused;
501 	struct sctx_info __user *info = extctx->lsx.addr;
502 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
503 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
504 	uint64_t __user *fcc	= &lsx_ctx->fcc;
505 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
506 
507 	err = sig = fcsr_pending(fcsr);
508 	if (err < 0)
509 		return err;
510 
511 	while (1) {
512 		lock_fpu_owner();
513 		if (is_lsx_enabled())
514 			err = restore_hw_lsx_context(lsx_ctx);
515 		else {
516 			err = copy_lsx_from_sigcontext(lsx_ctx);
517 			if (is_fpu_owner())
518 				restore_fp(current);
519 		}
520 		unlock_fpu_owner();
521 
522 		if (likely(!err))
523 			break;
524 		/* Touch the LSX context and try again */
525 		err = __get_user(tmp, &regs[0]) |
526 			__get_user(tmp, &regs[32*2-1]) |
527 			__get_user(tmp, fcc) |
528 			__get_user(tmp, fcsr);
529 		if (err)
530 			break;	/* really bad sigcontext */
531 	}
532 
533 	return err ?: sig;
534 }
535 
536 static int protected_save_lasx_context(struct extctx_layout *extctx)
537 {
538 	int err = 0;
539 	struct sctx_info __user *info = extctx->lasx.addr;
540 	struct lasx_context __user *lasx_ctx =
541 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
542 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
543 	uint64_t __user *fcc	= &lasx_ctx->fcc;
544 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
545 
546 	while (1) {
547 		lock_fpu_owner();
548 		if (is_lasx_enabled())
549 			err = save_hw_lasx_context(lasx_ctx);
550 		else {
551 			if (is_lsx_enabled())
552 				save_lsx(current);
553 			else if (is_fpu_owner())
554 				save_fp(current);
555 			err = copy_lasx_to_sigcontext(lasx_ctx);
556 		}
557 		unlock_fpu_owner();
558 
559 		err |= __put_user(LASX_CTX_MAGIC, &info->magic);
560 		err |= __put_user(extctx->lasx.size, &info->size);
561 
562 		if (likely(!err))
563 			break;
564 		/* Touch the LASX context and try again */
565 		err = __put_user(0, &regs[0]) |
566 			__put_user(0, &regs[32*4-1]) |
567 			__put_user(0, fcc) |
568 			__put_user(0, fcsr);
569 		if (err)
570 			return err;	/* really bad sigcontext */
571 	}
572 
573 	return err;
574 }
575 
576 static int protected_restore_lasx_context(struct extctx_layout *extctx)
577 {
578 	int err = 0, sig = 0, tmp __maybe_unused;
579 	struct sctx_info __user *info = extctx->lasx.addr;
580 	struct lasx_context __user *lasx_ctx =
581 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
582 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
583 	uint64_t __user *fcc	= &lasx_ctx->fcc;
584 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
585 
586 	err = sig = fcsr_pending(fcsr);
587 	if (err < 0)
588 		return err;
589 
590 	while (1) {
591 		lock_fpu_owner();
592 		if (is_lasx_enabled())
593 			err = restore_hw_lasx_context(lasx_ctx);
594 		else {
595 			err = copy_lasx_from_sigcontext(lasx_ctx);
596 			if (is_lsx_enabled())
597 				restore_lsx(current);
598 			else if (is_fpu_owner())
599 				restore_fp(current);
600 		}
601 		unlock_fpu_owner();
602 
603 		if (likely(!err))
604 			break;
605 		/* Touch the LASX context and try again */
606 		err = __get_user(tmp, &regs[0]) |
607 			__get_user(tmp, &regs[32*4-1]) |
608 			__get_user(tmp, fcc) |
609 			__get_user(tmp, fcsr);
610 		if (err)
611 			break;	/* really bad sigcontext */
612 	}
613 
614 	return err ?: sig;
615 }
616 
617 #ifdef CONFIG_CPU_HAS_LBT
618 static int protected_save_lbt_context(struct extctx_layout *extctx)
619 {
620 	int err = 0;
621 	struct sctx_info __user *info = extctx->lbt.addr;
622 	struct lbt_context __user *lbt_ctx =
623 		(struct lbt_context *)get_ctx_through_ctxinfo(info);
624 	uint64_t __user *regs	= (uint64_t *)&lbt_ctx->regs;
625 	uint32_t __user *eflags	= (uint32_t *)&lbt_ctx->eflags;
626 
627 	while (1) {
628 		lock_lbt_owner();
629 		if (is_lbt_owner())
630 			err |= save_hw_lbt_context(lbt_ctx);
631 		else
632 			err |= copy_lbt_to_sigcontext(lbt_ctx);
633 		if (is_fpu_owner())
634 			err |= save_hw_ftop_context(lbt_ctx);
635 		else
636 			err |= copy_ftop_to_sigcontext(lbt_ctx);
637 		unlock_lbt_owner();
638 
639 		err |= __put_user(LBT_CTX_MAGIC, &info->magic);
640 		err |= __put_user(extctx->lbt.size, &info->size);
641 
642 		if (likely(!err))
643 			break;
644 		/* Touch the LBT context and try again */
645 		err = __put_user(0, &regs[0]) | __put_user(0, eflags);
646 
647 		if (err)
648 			return err;
649 	}
650 
651 	return err;
652 }
653 
654 static int protected_restore_lbt_context(struct extctx_layout *extctx)
655 {
656 	int err = 0, tmp __maybe_unused;
657 	struct sctx_info __user *info = extctx->lbt.addr;
658 	struct lbt_context __user *lbt_ctx =
659 		(struct lbt_context *)get_ctx_through_ctxinfo(info);
660 	uint64_t __user *regs	= (uint64_t *)&lbt_ctx->regs;
661 	uint32_t __user *eflags	= (uint32_t *)&lbt_ctx->eflags;
662 
663 	while (1) {
664 		lock_lbt_owner();
665 		if (is_lbt_owner())
666 			err |= restore_hw_lbt_context(lbt_ctx);
667 		else
668 			err |= copy_lbt_from_sigcontext(lbt_ctx);
669 		if (is_fpu_owner())
670 			err |= restore_hw_ftop_context(lbt_ctx);
671 		else
672 			err |= copy_ftop_from_sigcontext(lbt_ctx);
673 		unlock_lbt_owner();
674 
675 		if (likely(!err))
676 			break;
677 		/* Touch the LBT context and try again */
678 		err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
679 
680 		if (err)
681 			return err;
682 	}
683 
684 	return err;
685 }
686 #endif
687 
688 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
689 			    struct extctx_layout *extctx)
690 {
691 	int i, err = 0;
692 	struct sctx_info __user *info;
693 
694 	err |= __put_user(regs->csr_era, &sc->sc_pc);
695 	err |= __put_user(extctx->flags, &sc->sc_flags);
696 
697 	err |= __put_user(0, &sc->sc_regs[0]);
698 	for (i = 1; i < 32; i++)
699 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
700 
701 	if (extctx->lasx.addr)
702 		err |= protected_save_lasx_context(extctx);
703 	else if (extctx->lsx.addr)
704 		err |= protected_save_lsx_context(extctx);
705 	else if (extctx->fpu.addr)
706 		err |= protected_save_fpu_context(extctx);
707 
708 #ifdef CONFIG_CPU_HAS_LBT
709 	if (extctx->lbt.addr)
710 		err |= protected_save_lbt_context(extctx);
711 #endif
712 
713 	/* Set the "end" magic */
714 	info = (struct sctx_info *)extctx->end.addr;
715 	err |= __put_user(0, &info->magic);
716 	err |= __put_user(0, &info->size);
717 
718 	return err;
719 }
720 
721 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
722 {
723 	int err = 0;
724 	unsigned int magic, size;
725 	struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
726 
727 	while(1) {
728 		err |= __get_user(magic, &info->magic);
729 		err |= __get_user(size, &info->size);
730 		if (err)
731 			return err;
732 
733 		switch (magic) {
734 		case 0: /* END */
735 			goto done;
736 
737 		case FPU_CTX_MAGIC:
738 			if (size < (sizeof(struct sctx_info) +
739 				    sizeof(struct fpu_context)))
740 				goto invalid;
741 			extctx->fpu.addr = info;
742 			break;
743 
744 		case LSX_CTX_MAGIC:
745 			if (size < (sizeof(struct sctx_info) +
746 				    sizeof(struct lsx_context)))
747 				goto invalid;
748 			extctx->lsx.addr = info;
749 			break;
750 
751 		case LASX_CTX_MAGIC:
752 			if (size < (sizeof(struct sctx_info) +
753 				    sizeof(struct lasx_context)))
754 				goto invalid;
755 			extctx->lasx.addr = info;
756 			break;
757 
758 		case LBT_CTX_MAGIC:
759 			if (size < (sizeof(struct sctx_info) +
760 				    sizeof(struct lbt_context)))
761 				goto invalid;
762 			extctx->lbt.addr = info;
763 			break;
764 
765 		default:
766 			goto invalid;
767 		}
768 
769 		info = (struct sctx_info *)((char *)info + size);
770 	}
771 
772 done:
773 	return 0;
774 
775 invalid:
776 	return -EINVAL;
777 }
778 
779 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
780 {
781 	int i, err = 0;
782 	struct extctx_layout extctx;
783 
784 	memset(&extctx, 0, sizeof(struct extctx_layout));
785 
786 	err = __get_user(extctx.flags, &sc->sc_flags);
787 	if (err)
788 		goto bad;
789 
790 	err = parse_extcontext(sc, &extctx);
791 	if (err)
792 		goto bad;
793 
794 	conditional_used_math(extctx.flags & SC_USED_FP);
795 
796 	/*
797 	 * The signal handler may have used FPU; give it up if the program
798 	 * doesn't want it following sigreturn.
799 	 */
800 	if (!(extctx.flags & SC_USED_FP))
801 		lose_fpu(0);
802 
803 	/* Always make any pending restarted system calls return -EINTR */
804 	current->restart_block.fn = do_no_restart_syscall;
805 
806 	err |= __get_user(regs->csr_era, &sc->sc_pc);
807 	for (i = 1; i < 32; i++)
808 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
809 
810 	if (extctx.lasx.addr)
811 		err |= protected_restore_lasx_context(&extctx);
812 	else if (extctx.lsx.addr)
813 		err |= protected_restore_lsx_context(&extctx);
814 	else if (extctx.fpu.addr)
815 		err |= protected_restore_fpu_context(&extctx);
816 
817 #ifdef CONFIG_CPU_HAS_LBT
818 	if (extctx.lbt.addr)
819 		err |= protected_restore_lbt_context(&extctx);
820 #endif
821 
822 bad:
823 	return err;
824 }
825 
826 static unsigned int handle_flags(void)
827 {
828 	unsigned int flags = 0;
829 
830 	flags = used_math() ? SC_USED_FP : 0;
831 
832 	switch (current->thread.error_code) {
833 	case 1:
834 		flags |= SC_ADDRERR_RD;
835 		break;
836 	case 2:
837 		flags |= SC_ADDRERR_WR;
838 		break;
839 	}
840 
841 	return flags;
842 }
843 
844 static unsigned long extframe_alloc(struct extctx_layout *extctx,
845 				    struct _ctx_layout *layout,
846 				    size_t size, unsigned int align, unsigned long base)
847 {
848 	unsigned long new_base = base - size;
849 
850 	new_base = round_down(new_base, (align < 16 ? 16 : align));
851 	new_base -= sizeof(struct sctx_info);
852 
853 	layout->addr = (void *)new_base;
854 	layout->size = (unsigned int)(base - new_base);
855 	extctx->size += layout->size;
856 
857 	return new_base;
858 }
859 
860 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
861 {
862 	unsigned long new_sp = sp;
863 
864 	memset(extctx, 0, sizeof(struct extctx_layout));
865 
866 	extctx->flags = handle_flags();
867 
868 	/* Grow down, alloc "end" context info first. */
869 	new_sp -= sizeof(struct sctx_info);
870 	extctx->end.addr = (void *)new_sp;
871 	extctx->end.size = (unsigned int)sizeof(struct sctx_info);
872 	extctx->size += extctx->end.size;
873 
874 	if (extctx->flags & SC_USED_FP) {
875 		if (cpu_has_lasx && thread_lasx_context_live())
876 			new_sp = extframe_alloc(extctx, &extctx->lasx,
877 			  sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
878 		else if (cpu_has_lsx && thread_lsx_context_live())
879 			new_sp = extframe_alloc(extctx, &extctx->lsx,
880 			  sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
881 		else if (cpu_has_fpu)
882 			new_sp = extframe_alloc(extctx, &extctx->fpu,
883 			  sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
884 	}
885 
886 #ifdef CONFIG_CPU_HAS_LBT
887 	if (cpu_has_lbt && thread_lbt_context_live()) {
888 		new_sp = extframe_alloc(extctx, &extctx->lbt,
889 			  sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
890 	}
891 #endif
892 
893 	return new_sp;
894 }
895 
896 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
897 				 struct extctx_layout *extctx)
898 {
899 	unsigned long sp;
900 
901 	/* Default to using normal stack */
902 	sp = regs->regs[3];
903 
904 	/*
905 	 * If we are on the alternate signal stack and would overflow it, don't.
906 	 * Return an always-bogus address instead so we will die with SIGSEGV.
907 	 */
908 	if (on_sig_stack(sp) &&
909 	    !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
910 		return (void __user __force *)(-1UL);
911 
912 	sp = sigsp(sp, ksig);
913 	sp = round_down(sp, 16);
914 	sp = setup_extcontext(extctx, sp);
915 	sp -= sizeof(struct rt_sigframe);
916 
917 	if (!IS_ALIGNED(sp, 16))
918 		BUG();
919 
920 	return (void __user *)sp;
921 }
922 
923 /*
924  * Atomically swap in the new signal mask, and wait for a signal.
925  */
926 
927 SYSCALL_DEFINE0(rt_sigreturn)
928 {
929 	int sig;
930 	sigset_t set;
931 	struct pt_regs *regs;
932 	struct rt_sigframe __user *frame;
933 
934 	regs = current_pt_regs();
935 	frame = (struct rt_sigframe __user *)regs->regs[3];
936 	if (!access_ok(frame, sizeof(*frame)))
937 		goto badframe;
938 	if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
939 		goto badframe;
940 
941 	set_current_blocked(&set);
942 
943 	sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
944 	if (sig < 0)
945 		goto badframe;
946 	else if (sig)
947 		force_sig(sig);
948 
949 	regs->regs[0] = 0; /* No syscall restarting */
950 	if (restore_altstack(&frame->rs_uctx.uc_stack))
951 		goto badframe;
952 
953 	return regs->regs[4];
954 
955 badframe:
956 	force_sig(SIGSEGV);
957 	return 0;
958 }
959 
960 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
961 			  struct pt_regs *regs, sigset_t *set)
962 {
963 	int err = 0;
964 	struct extctx_layout extctx;
965 	struct rt_sigframe __user *frame;
966 
967 	frame = get_sigframe(ksig, regs, &extctx);
968 	if (!access_ok(frame, sizeof(*frame) + extctx.size))
969 		return -EFAULT;
970 
971 	/* Create siginfo.  */
972 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
973 
974 	/* Create the ucontext.	 */
975 	err |= __put_user(0, &frame->rs_uctx.uc_flags);
976 	err |= __put_user(NULL, &frame->rs_uctx.uc_link);
977 	err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
978 	err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
979 	err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
980 
981 	if (err)
982 		return -EFAULT;
983 
984 	/*
985 	 * Arguments to signal handler:
986 	 *
987 	 *   a0 = signal number
988 	 *   a1 = pointer to siginfo
989 	 *   a2 = pointer to ucontext
990 	 *
991 	 * c0_era point to the signal handler, $r3 (sp) points to
992 	 * the struct rt_sigframe.
993 	 */
994 	regs->regs[4] = ksig->sig;
995 	regs->regs[5] = (unsigned long) &frame->rs_info;
996 	regs->regs[6] = (unsigned long) &frame->rs_uctx;
997 	regs->regs[3] = (unsigned long) frame;
998 	regs->regs[1] = (unsigned long) sig_return;
999 	regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
1000 
1001 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
1002 	       current->comm, current->pid,
1003 	       frame, regs->csr_era, regs->regs[1]);
1004 
1005 	return 0;
1006 }
1007 
1008 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1009 {
1010 	int ret;
1011 	sigset_t *oldset = sigmask_to_save();
1012 	void *vdso = current->mm->context.vdso;
1013 
1014 	/* Are we from a system call? */
1015 	if (regs->regs[0]) {
1016 		switch (regs->regs[4]) {
1017 		case -ERESTART_RESTARTBLOCK:
1018 		case -ERESTARTNOHAND:
1019 			regs->regs[4] = -EINTR;
1020 			break;
1021 		case -ERESTARTSYS:
1022 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
1023 				regs->regs[4] = -EINTR;
1024 				break;
1025 			}
1026 			fallthrough;
1027 		case -ERESTARTNOINTR:
1028 			regs->regs[4] = regs->orig_a0;
1029 			regs->csr_era -= 4;
1030 		}
1031 
1032 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
1033 	}
1034 
1035 	rseq_signal_deliver(ksig, regs);
1036 
1037 	ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
1038 
1039 	signal_setup_done(ret, ksig, 0);
1040 }
1041 
1042 void arch_do_signal_or_restart(struct pt_regs *regs)
1043 {
1044 	struct ksignal ksig;
1045 
1046 	if (get_signal(&ksig)) {
1047 		/* Whee!  Actually deliver the signal.	*/
1048 		handle_signal(&ksig, regs);
1049 		return;
1050 	}
1051 
1052 	/* Are we from a system call? */
1053 	if (regs->regs[0]) {
1054 		switch (regs->regs[4]) {
1055 		case -ERESTARTNOHAND:
1056 		case -ERESTARTSYS:
1057 		case -ERESTARTNOINTR:
1058 			regs->regs[4] = regs->orig_a0;
1059 			regs->csr_era -= 4;
1060 			break;
1061 
1062 		case -ERESTART_RESTARTBLOCK:
1063 			regs->regs[4] = regs->orig_a0;
1064 			regs->regs[11] = __NR_restart_syscall;
1065 			regs->csr_era -= 4;
1066 			break;
1067 		}
1068 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
1069 	}
1070 
1071 	/*
1072 	 * If there's no signal to deliver, we just put the saved sigmask
1073 	 * back
1074 	 */
1075 	restore_saved_sigmask();
1076 }
1077