1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Author: Hanlu Li <lihanlu@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 *
7 * Derived from MIPS:
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 * Copyright (C) 2014, Imagination Technologies Ltd.
12 */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/entry-common.h>
17 #include <linux/irqflags.h>
18 #include <linux/rseq.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/personality.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/wait.h>
27 #include <linux/ptrace.h>
28 #include <linux/unistd.h>
29 #include <linux/compiler.h>
30 #include <linux/syscalls.h>
31 #include <linux/uaccess.h>
32
33 #include <asm/asm.h>
34 #include <asm/cacheflush.h>
35 #include <asm/cpu-features.h>
36 #include <asm/fpu.h>
37 #include <asm/lbt.h>
38 #include <asm/ucontext.h>
39 #include <asm/vdso.h>
40
41 #ifdef DEBUG_SIG
42 # define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
43 #else
44 # define DEBUGP(fmt, args...)
45 #endif
46
47 /* Make sure we will not lose FPU ownership */
48 #define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
49 #define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
50 /* Make sure we will not lose LBT ownership */
51 #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
52 #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
53
54 struct rt_sigframe {
55 struct siginfo rs_info;
56 struct ucontext rs_uctx;
57 };
58
59 struct _ctx_layout {
60 struct sctx_info *addr;
61 unsigned int size;
62 };
63
64 struct extctx_layout {
65 unsigned long size;
66 unsigned int flags;
67 struct _ctx_layout fpu;
68 struct _ctx_layout lsx;
69 struct _ctx_layout lasx;
70 struct _ctx_layout lbt;
71 struct _ctx_layout end;
72 };
73
get_ctx_through_ctxinfo(struct sctx_info * info)74 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
75 {
76 return (void __user *)((char *)info + sizeof(struct sctx_info));
77 }
78
79 /*
80 * Thread saved context copy to/from a signal context presumed to be on the
81 * user stack, and therefore accessed with appropriate macros from uaccess.h.
82 */
copy_fpu_to_sigcontext(struct fpu_context __user * ctx)83 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
84 {
85 int i;
86 int err = 0;
87 uint64_t __user *regs = (uint64_t *)&ctx->regs;
88 uint64_t __user *fcc = &ctx->fcc;
89 uint32_t __user *fcsr = &ctx->fcsr;
90
91 for (i = 0; i < NUM_FPU_REGS; i++) {
92 err |=
93 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
94 ®s[i]);
95 }
96 err |= __put_user(current->thread.fpu.fcc, fcc);
97 err |= __put_user(current->thread.fpu.fcsr, fcsr);
98
99 return err;
100 }
101
copy_fpu_from_sigcontext(struct fpu_context __user * ctx)102 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
103 {
104 int i;
105 int err = 0;
106 u64 fpr_val;
107 uint64_t __user *regs = (uint64_t *)&ctx->regs;
108 uint64_t __user *fcc = &ctx->fcc;
109 uint32_t __user *fcsr = &ctx->fcsr;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |= __get_user(fpr_val, ®s[i]);
113 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
114 }
115 err |= __get_user(current->thread.fpu.fcc, fcc);
116 err |= __get_user(current->thread.fpu.fcsr, fcsr);
117
118 return err;
119 }
120
copy_lsx_to_sigcontext(struct lsx_context __user * ctx)121 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
122 {
123 int i;
124 int err = 0;
125 uint64_t __user *regs = (uint64_t *)&ctx->regs;
126 uint64_t __user *fcc = &ctx->fcc;
127 uint32_t __user *fcsr = &ctx->fcsr;
128
129 for (i = 0; i < NUM_FPU_REGS; i++) {
130 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
131 ®s[2*i]);
132 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
133 ®s[2*i+1]);
134 }
135 err |= __put_user(current->thread.fpu.fcc, fcc);
136 err |= __put_user(current->thread.fpu.fcsr, fcsr);
137
138 return err;
139 }
140
copy_lsx_from_sigcontext(struct lsx_context __user * ctx)141 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
142 {
143 int i;
144 int err = 0;
145 u64 fpr_val;
146 uint64_t __user *regs = (uint64_t *)&ctx->regs;
147 uint64_t __user *fcc = &ctx->fcc;
148 uint32_t __user *fcsr = &ctx->fcsr;
149
150 for (i = 0; i < NUM_FPU_REGS; i++) {
151 err |= __get_user(fpr_val, ®s[2*i]);
152 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
153 err |= __get_user(fpr_val, ®s[2*i+1]);
154 set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
155 }
156 err |= __get_user(current->thread.fpu.fcc, fcc);
157 err |= __get_user(current->thread.fpu.fcsr, fcsr);
158
159 return err;
160 }
161
copy_lasx_to_sigcontext(struct lasx_context __user * ctx)162 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
163 {
164 int i;
165 int err = 0;
166 uint64_t __user *regs = (uint64_t *)&ctx->regs;
167 uint64_t __user *fcc = &ctx->fcc;
168 uint32_t __user *fcsr = &ctx->fcsr;
169
170 for (i = 0; i < NUM_FPU_REGS; i++) {
171 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
172 ®s[4*i]);
173 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
174 ®s[4*i+1]);
175 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 2),
176 ®s[4*i+2]);
177 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 3),
178 ®s[4*i+3]);
179 }
180 err |= __put_user(current->thread.fpu.fcc, fcc);
181 err |= __put_user(current->thread.fpu.fcsr, fcsr);
182
183 return err;
184 }
185
copy_lasx_from_sigcontext(struct lasx_context __user * ctx)186 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
187 {
188 int i;
189 int err = 0;
190 u64 fpr_val;
191 uint64_t __user *regs = (uint64_t *)&ctx->regs;
192 uint64_t __user *fcc = &ctx->fcc;
193 uint32_t __user *fcsr = &ctx->fcsr;
194
195 for (i = 0; i < NUM_FPU_REGS; i++) {
196 err |= __get_user(fpr_val, ®s[4*i]);
197 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
198 err |= __get_user(fpr_val, ®s[4*i+1]);
199 set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
200 err |= __get_user(fpr_val, ®s[4*i+2]);
201 set_fpr64(¤t->thread.fpu.fpr[i], 2, fpr_val);
202 err |= __get_user(fpr_val, ®s[4*i+3]);
203 set_fpr64(¤t->thread.fpu.fpr[i], 3, fpr_val);
204 }
205 err |= __get_user(current->thread.fpu.fcc, fcc);
206 err |= __get_user(current->thread.fpu.fcsr, fcsr);
207
208 return err;
209 }
210
211 #ifdef CONFIG_CPU_HAS_LBT
copy_lbt_to_sigcontext(struct lbt_context __user * ctx)212 static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
213 {
214 int err = 0;
215 uint64_t __user *regs = (uint64_t *)&ctx->regs;
216 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
217
218 err |= __put_user(current->thread.lbt.scr0, ®s[0]);
219 err |= __put_user(current->thread.lbt.scr1, ®s[1]);
220 err |= __put_user(current->thread.lbt.scr2, ®s[2]);
221 err |= __put_user(current->thread.lbt.scr3, ®s[3]);
222 err |= __put_user(current->thread.lbt.eflags, eflags);
223
224 return err;
225 }
226
copy_lbt_from_sigcontext(struct lbt_context __user * ctx)227 static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
228 {
229 int err = 0;
230 uint64_t __user *regs = (uint64_t *)&ctx->regs;
231 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
232
233 err |= __get_user(current->thread.lbt.scr0, ®s[0]);
234 err |= __get_user(current->thread.lbt.scr1, ®s[1]);
235 err |= __get_user(current->thread.lbt.scr2, ®s[2]);
236 err |= __get_user(current->thread.lbt.scr3, ®s[3]);
237 err |= __get_user(current->thread.lbt.eflags, eflags);
238
239 return err;
240 }
241
copy_ftop_to_sigcontext(struct lbt_context __user * ctx)242 static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
243 {
244 uint32_t __user *ftop = &ctx->ftop;
245
246 return __put_user(current->thread.fpu.ftop, ftop);
247 }
248
copy_ftop_from_sigcontext(struct lbt_context __user * ctx)249 static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
250 {
251 uint32_t __user *ftop = &ctx->ftop;
252
253 return __get_user(current->thread.fpu.ftop, ftop);
254 }
255 #endif
256
257 /*
258 * Wrappers for the assembly _{save,restore}_fp_context functions.
259 */
save_hw_fpu_context(struct fpu_context __user * ctx)260 static int save_hw_fpu_context(struct fpu_context __user *ctx)
261 {
262 uint64_t __user *regs = (uint64_t *)&ctx->regs;
263 uint64_t __user *fcc = &ctx->fcc;
264 uint32_t __user *fcsr = &ctx->fcsr;
265
266 return _save_fp_context(regs, fcc, fcsr);
267 }
268
restore_hw_fpu_context(struct fpu_context __user * ctx)269 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
270 {
271 uint64_t __user *regs = (uint64_t *)&ctx->regs;
272 uint64_t __user *fcc = &ctx->fcc;
273 uint32_t __user *fcsr = &ctx->fcsr;
274
275 return _restore_fp_context(regs, fcc, fcsr);
276 }
277
save_hw_lsx_context(struct lsx_context __user * ctx)278 static int save_hw_lsx_context(struct lsx_context __user *ctx)
279 {
280 uint64_t __user *regs = (uint64_t *)&ctx->regs;
281 uint64_t __user *fcc = &ctx->fcc;
282 uint32_t __user *fcsr = &ctx->fcsr;
283
284 return _save_lsx_context(regs, fcc, fcsr);
285 }
286
restore_hw_lsx_context(struct lsx_context __user * ctx)287 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
288 {
289 uint64_t __user *regs = (uint64_t *)&ctx->regs;
290 uint64_t __user *fcc = &ctx->fcc;
291 uint32_t __user *fcsr = &ctx->fcsr;
292
293 return _restore_lsx_context(regs, fcc, fcsr);
294 }
295
save_hw_lasx_context(struct lasx_context __user * ctx)296 static int save_hw_lasx_context(struct lasx_context __user *ctx)
297 {
298 uint64_t __user *regs = (uint64_t *)&ctx->regs;
299 uint64_t __user *fcc = &ctx->fcc;
300 uint32_t __user *fcsr = &ctx->fcsr;
301
302 return _save_lasx_context(regs, fcc, fcsr);
303 }
304
restore_hw_lasx_context(struct lasx_context __user * ctx)305 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
306 {
307 uint64_t __user *regs = (uint64_t *)&ctx->regs;
308 uint64_t __user *fcc = &ctx->fcc;
309 uint32_t __user *fcsr = &ctx->fcsr;
310
311 return _restore_lasx_context(regs, fcc, fcsr);
312 }
313
314 /*
315 * Wrappers for the assembly _{save,restore}_lbt_context functions.
316 */
317 #ifdef CONFIG_CPU_HAS_LBT
save_hw_lbt_context(struct lbt_context __user * ctx)318 static int save_hw_lbt_context(struct lbt_context __user *ctx)
319 {
320 uint64_t __user *regs = (uint64_t *)&ctx->regs;
321 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
322
323 return _save_lbt_context(regs, eflags);
324 }
325
restore_hw_lbt_context(struct lbt_context __user * ctx)326 static int restore_hw_lbt_context(struct lbt_context __user *ctx)
327 {
328 uint64_t __user *regs = (uint64_t *)&ctx->regs;
329 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
330
331 return _restore_lbt_context(regs, eflags);
332 }
333
save_hw_ftop_context(struct lbt_context __user * ctx)334 static int save_hw_ftop_context(struct lbt_context __user *ctx)
335 {
336 uint32_t __user *ftop = &ctx->ftop;
337
338 return _save_ftop_context(ftop);
339 }
340
restore_hw_ftop_context(struct lbt_context __user * ctx)341 static int restore_hw_ftop_context(struct lbt_context __user *ctx)
342 {
343 uint32_t __user *ftop = &ctx->ftop;
344
345 return _restore_ftop_context(ftop);
346 }
347 #endif
348
fcsr_pending(unsigned int __user * fcsr)349 static int fcsr_pending(unsigned int __user *fcsr)
350 {
351 int err, sig = 0;
352 unsigned int csr, enabled;
353
354 err = __get_user(csr, fcsr);
355 enabled = ((csr & FPU_CSR_ALL_E) << 24);
356 /*
357 * If the signal handler set some FPU exceptions, clear it and
358 * send SIGFPE.
359 */
360 if (csr & enabled) {
361 csr &= ~enabled;
362 err |= __put_user(csr, fcsr);
363 sig = SIGFPE;
364 }
365 return err ?: sig;
366 }
367
368 /*
369 * Helper routines
370 */
protected_save_fpu_context(struct extctx_layout * extctx)371 static int protected_save_fpu_context(struct extctx_layout *extctx)
372 {
373 int err = 0;
374 struct sctx_info __user *info = extctx->fpu.addr;
375 struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
376 uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
377 uint64_t __user *fcc = &fpu_ctx->fcc;
378 uint32_t __user *fcsr = &fpu_ctx->fcsr;
379
380 while (1) {
381 lock_fpu_owner();
382 if (is_fpu_owner())
383 err = save_hw_fpu_context(fpu_ctx);
384 else
385 err = copy_fpu_to_sigcontext(fpu_ctx);
386 unlock_fpu_owner();
387
388 err |= __put_user(FPU_CTX_MAGIC, &info->magic);
389 err |= __put_user(extctx->fpu.size, &info->size);
390
391 if (likely(!err))
392 break;
393 /* Touch the FPU context and try again */
394 err = __put_user(0, ®s[0]) |
395 __put_user(0, ®s[31]) |
396 __put_user(0, fcc) |
397 __put_user(0, fcsr);
398 if (err)
399 return err; /* really bad sigcontext */
400 }
401
402 return err;
403 }
404
protected_restore_fpu_context(struct extctx_layout * extctx)405 static int protected_restore_fpu_context(struct extctx_layout *extctx)
406 {
407 int err = 0, sig = 0, tmp __maybe_unused;
408 struct sctx_info __user *info = extctx->fpu.addr;
409 struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
410 uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
411 uint64_t __user *fcc = &fpu_ctx->fcc;
412 uint32_t __user *fcsr = &fpu_ctx->fcsr;
413
414 err = sig = fcsr_pending(fcsr);
415 if (err < 0)
416 return err;
417
418 while (1) {
419 lock_fpu_owner();
420 if (is_fpu_owner())
421 err = restore_hw_fpu_context(fpu_ctx);
422 else
423 err = copy_fpu_from_sigcontext(fpu_ctx);
424 unlock_fpu_owner();
425
426 if (likely(!err))
427 break;
428 /* Touch the FPU context and try again */
429 err = __get_user(tmp, ®s[0]) |
430 __get_user(tmp, ®s[31]) |
431 __get_user(tmp, fcc) |
432 __get_user(tmp, fcsr);
433 if (err)
434 break; /* really bad sigcontext */
435 }
436
437 return err ?: sig;
438 }
439
protected_save_lsx_context(struct extctx_layout * extctx)440 static int protected_save_lsx_context(struct extctx_layout *extctx)
441 {
442 int err = 0;
443 struct sctx_info __user *info = extctx->lsx.addr;
444 struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
445 uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
446 uint64_t __user *fcc = &lsx_ctx->fcc;
447 uint32_t __user *fcsr = &lsx_ctx->fcsr;
448
449 while (1) {
450 lock_fpu_owner();
451 if (is_lsx_enabled())
452 err = save_hw_lsx_context(lsx_ctx);
453 else {
454 if (is_fpu_owner())
455 save_fp(current);
456 err = copy_lsx_to_sigcontext(lsx_ctx);
457 }
458 unlock_fpu_owner();
459
460 err |= __put_user(LSX_CTX_MAGIC, &info->magic);
461 err |= __put_user(extctx->lsx.size, &info->size);
462
463 if (likely(!err))
464 break;
465 /* Touch the LSX context and try again */
466 err = __put_user(0, ®s[0]) |
467 __put_user(0, ®s[32*2-1]) |
468 __put_user(0, fcc) |
469 __put_user(0, fcsr);
470 if (err)
471 return err; /* really bad sigcontext */
472 }
473
474 return err;
475 }
476
protected_restore_lsx_context(struct extctx_layout * extctx)477 static int protected_restore_lsx_context(struct extctx_layout *extctx)
478 {
479 int err = 0, sig = 0, tmp __maybe_unused;
480 struct sctx_info __user *info = extctx->lsx.addr;
481 struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
482 uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
483 uint64_t __user *fcc = &lsx_ctx->fcc;
484 uint32_t __user *fcsr = &lsx_ctx->fcsr;
485
486 err = sig = fcsr_pending(fcsr);
487 if (err < 0)
488 return err;
489
490 while (1) {
491 lock_fpu_owner();
492 if (is_lsx_enabled())
493 err = restore_hw_lsx_context(lsx_ctx);
494 else {
495 err = copy_lsx_from_sigcontext(lsx_ctx);
496 if (is_fpu_owner())
497 restore_fp(current);
498 }
499 unlock_fpu_owner();
500
501 if (likely(!err))
502 break;
503 /* Touch the LSX context and try again */
504 err = __get_user(tmp, ®s[0]) |
505 __get_user(tmp, ®s[32*2-1]) |
506 __get_user(tmp, fcc) |
507 __get_user(tmp, fcsr);
508 if (err)
509 break; /* really bad sigcontext */
510 }
511
512 return err ?: sig;
513 }
514
protected_save_lasx_context(struct extctx_layout * extctx)515 static int protected_save_lasx_context(struct extctx_layout *extctx)
516 {
517 int err = 0;
518 struct sctx_info __user *info = extctx->lasx.addr;
519 struct lasx_context __user *lasx_ctx =
520 (struct lasx_context *)get_ctx_through_ctxinfo(info);
521 uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
522 uint64_t __user *fcc = &lasx_ctx->fcc;
523 uint32_t __user *fcsr = &lasx_ctx->fcsr;
524
525 while (1) {
526 lock_fpu_owner();
527 if (is_lasx_enabled())
528 err = save_hw_lasx_context(lasx_ctx);
529 else {
530 if (is_lsx_enabled())
531 save_lsx(current);
532 else if (is_fpu_owner())
533 save_fp(current);
534 err = copy_lasx_to_sigcontext(lasx_ctx);
535 }
536 unlock_fpu_owner();
537
538 err |= __put_user(LASX_CTX_MAGIC, &info->magic);
539 err |= __put_user(extctx->lasx.size, &info->size);
540
541 if (likely(!err))
542 break;
543 /* Touch the LASX context and try again */
544 err = __put_user(0, ®s[0]) |
545 __put_user(0, ®s[32*4-1]) |
546 __put_user(0, fcc) |
547 __put_user(0, fcsr);
548 if (err)
549 return err; /* really bad sigcontext */
550 }
551
552 return err;
553 }
554
protected_restore_lasx_context(struct extctx_layout * extctx)555 static int protected_restore_lasx_context(struct extctx_layout *extctx)
556 {
557 int err = 0, sig = 0, tmp __maybe_unused;
558 struct sctx_info __user *info = extctx->lasx.addr;
559 struct lasx_context __user *lasx_ctx =
560 (struct lasx_context *)get_ctx_through_ctxinfo(info);
561 uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
562 uint64_t __user *fcc = &lasx_ctx->fcc;
563 uint32_t __user *fcsr = &lasx_ctx->fcsr;
564
565 err = sig = fcsr_pending(fcsr);
566 if (err < 0)
567 return err;
568
569 while (1) {
570 lock_fpu_owner();
571 if (is_lasx_enabled())
572 err = restore_hw_lasx_context(lasx_ctx);
573 else {
574 err = copy_lasx_from_sigcontext(lasx_ctx);
575 if (is_lsx_enabled())
576 restore_lsx(current);
577 else if (is_fpu_owner())
578 restore_fp(current);
579 }
580 unlock_fpu_owner();
581
582 if (likely(!err))
583 break;
584 /* Touch the LASX context and try again */
585 err = __get_user(tmp, ®s[0]) |
586 __get_user(tmp, ®s[32*4-1]) |
587 __get_user(tmp, fcc) |
588 __get_user(tmp, fcsr);
589 if (err)
590 break; /* really bad sigcontext */
591 }
592
593 return err ?: sig;
594 }
595
596 #ifdef CONFIG_CPU_HAS_LBT
protected_save_lbt_context(struct extctx_layout * extctx)597 static int protected_save_lbt_context(struct extctx_layout *extctx)
598 {
599 int err = 0;
600 struct sctx_info __user *info = extctx->lbt.addr;
601 struct lbt_context __user *lbt_ctx =
602 (struct lbt_context *)get_ctx_through_ctxinfo(info);
603 uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
604 uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
605
606 while (1) {
607 lock_lbt_owner();
608 if (is_lbt_owner())
609 err |= save_hw_lbt_context(lbt_ctx);
610 else
611 err |= copy_lbt_to_sigcontext(lbt_ctx);
612 if (is_fpu_owner())
613 err |= save_hw_ftop_context(lbt_ctx);
614 else
615 err |= copy_ftop_to_sigcontext(lbt_ctx);
616 unlock_lbt_owner();
617
618 err |= __put_user(LBT_CTX_MAGIC, &info->magic);
619 err |= __put_user(extctx->lbt.size, &info->size);
620
621 if (likely(!err))
622 break;
623 /* Touch the LBT context and try again */
624 err = __put_user(0, ®s[0]) | __put_user(0, eflags);
625
626 if (err)
627 return err;
628 }
629
630 return err;
631 }
632
protected_restore_lbt_context(struct extctx_layout * extctx)633 static int protected_restore_lbt_context(struct extctx_layout *extctx)
634 {
635 int err = 0, tmp __maybe_unused;
636 struct sctx_info __user *info = extctx->lbt.addr;
637 struct lbt_context __user *lbt_ctx =
638 (struct lbt_context *)get_ctx_through_ctxinfo(info);
639 uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
640 uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
641
642 while (1) {
643 lock_lbt_owner();
644 if (is_lbt_owner())
645 err |= restore_hw_lbt_context(lbt_ctx);
646 else
647 err |= copy_lbt_from_sigcontext(lbt_ctx);
648 if (is_fpu_owner())
649 err |= restore_hw_ftop_context(lbt_ctx);
650 else
651 err |= copy_ftop_from_sigcontext(lbt_ctx);
652 unlock_lbt_owner();
653
654 if (likely(!err))
655 break;
656 /* Touch the LBT context and try again */
657 err = __get_user(tmp, ®s[0]) | __get_user(tmp, eflags);
658
659 if (err)
660 return err;
661 }
662
663 return err;
664 }
665 #endif
666
setup_sigcontext(struct pt_regs * regs,struct sigcontext __user * sc,struct extctx_layout * extctx)667 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
668 struct extctx_layout *extctx)
669 {
670 int i, err = 0;
671 struct sctx_info __user *info;
672
673 err |= __put_user(regs->csr_era, &sc->sc_pc);
674 err |= __put_user(extctx->flags, &sc->sc_flags);
675
676 err |= __put_user(0, &sc->sc_regs[0]);
677 for (i = 1; i < 32; i++)
678 err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
679
680 if (extctx->lasx.addr)
681 err |= protected_save_lasx_context(extctx);
682 else if (extctx->lsx.addr)
683 err |= protected_save_lsx_context(extctx);
684 else if (extctx->fpu.addr)
685 err |= protected_save_fpu_context(extctx);
686
687 #ifdef CONFIG_CPU_HAS_LBT
688 if (extctx->lbt.addr)
689 err |= protected_save_lbt_context(extctx);
690 #endif
691
692 /* Set the "end" magic */
693 info = (struct sctx_info *)extctx->end.addr;
694 err |= __put_user(0, &info->magic);
695 err |= __put_user(0, &info->size);
696
697 return err;
698 }
699
parse_extcontext(struct sigcontext __user * sc,struct extctx_layout * extctx)700 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
701 {
702 int err = 0;
703 unsigned int magic, size;
704 struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
705
706 while(1) {
707 err |= __get_user(magic, &info->magic);
708 err |= __get_user(size, &info->size);
709 if (err)
710 return err;
711
712 switch (magic) {
713 case 0: /* END */
714 goto done;
715
716 case FPU_CTX_MAGIC:
717 if (size < (sizeof(struct sctx_info) +
718 sizeof(struct fpu_context)))
719 goto invalid;
720 extctx->fpu.addr = info;
721 break;
722
723 case LSX_CTX_MAGIC:
724 if (size < (sizeof(struct sctx_info) +
725 sizeof(struct lsx_context)))
726 goto invalid;
727 extctx->lsx.addr = info;
728 break;
729
730 case LASX_CTX_MAGIC:
731 if (size < (sizeof(struct sctx_info) +
732 sizeof(struct lasx_context)))
733 goto invalid;
734 extctx->lasx.addr = info;
735 break;
736
737 case LBT_CTX_MAGIC:
738 if (size < (sizeof(struct sctx_info) +
739 sizeof(struct lbt_context)))
740 goto invalid;
741 extctx->lbt.addr = info;
742 break;
743
744 default:
745 goto invalid;
746 }
747
748 info = (struct sctx_info *)((char *)info + size);
749 }
750
751 done:
752 return 0;
753
754 invalid:
755 return -EINVAL;
756 }
757
restore_sigcontext(struct pt_regs * regs,struct sigcontext __user * sc)758 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
759 {
760 int i, err = 0;
761 struct extctx_layout extctx;
762
763 memset(&extctx, 0, sizeof(struct extctx_layout));
764
765 err = __get_user(extctx.flags, &sc->sc_flags);
766 if (err)
767 goto bad;
768
769 err = parse_extcontext(sc, &extctx);
770 if (err)
771 goto bad;
772
773 conditional_used_math(extctx.flags & SC_USED_FP);
774
775 /*
776 * The signal handler may have used FPU; give it up if the program
777 * doesn't want it following sigreturn.
778 */
779 if (!(extctx.flags & SC_USED_FP))
780 lose_fpu(0);
781
782 /* Always make any pending restarted system calls return -EINTR */
783 current->restart_block.fn = do_no_restart_syscall;
784
785 err |= __get_user(regs->csr_era, &sc->sc_pc);
786 for (i = 1; i < 32; i++)
787 err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
788
789 if (extctx.lasx.addr)
790 err |= protected_restore_lasx_context(&extctx);
791 else if (extctx.lsx.addr)
792 err |= protected_restore_lsx_context(&extctx);
793 else if (extctx.fpu.addr)
794 err |= protected_restore_fpu_context(&extctx);
795
796 #ifdef CONFIG_CPU_HAS_LBT
797 if (extctx.lbt.addr)
798 err |= protected_restore_lbt_context(&extctx);
799 #endif
800
801 bad:
802 return err;
803 }
804
handle_flags(void)805 static unsigned int handle_flags(void)
806 {
807 unsigned int flags = 0;
808
809 flags = used_math() ? SC_USED_FP : 0;
810
811 switch (current->thread.error_code) {
812 case 1:
813 flags |= SC_ADDRERR_RD;
814 break;
815 case 2:
816 flags |= SC_ADDRERR_WR;
817 break;
818 }
819
820 return flags;
821 }
822
extframe_alloc(struct extctx_layout * extctx,struct _ctx_layout * layout,size_t size,unsigned int align,unsigned long base)823 static unsigned long extframe_alloc(struct extctx_layout *extctx,
824 struct _ctx_layout *layout,
825 size_t size, unsigned int align, unsigned long base)
826 {
827 unsigned long new_base = base - size;
828
829 new_base = round_down(new_base, (align < 16 ? 16 : align));
830 new_base -= sizeof(struct sctx_info);
831
832 layout->addr = (void *)new_base;
833 layout->size = (unsigned int)(base - new_base);
834 extctx->size += layout->size;
835
836 return new_base;
837 }
838
setup_extcontext(struct extctx_layout * extctx,unsigned long sp)839 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
840 {
841 unsigned long new_sp = sp;
842
843 memset(extctx, 0, sizeof(struct extctx_layout));
844
845 extctx->flags = handle_flags();
846
847 /* Grow down, alloc "end" context info first. */
848 new_sp -= sizeof(struct sctx_info);
849 extctx->end.addr = (void *)new_sp;
850 extctx->end.size = (unsigned int)sizeof(struct sctx_info);
851 extctx->size += extctx->end.size;
852
853 if (extctx->flags & SC_USED_FP) {
854 if (cpu_has_lasx && thread_lasx_context_live())
855 new_sp = extframe_alloc(extctx, &extctx->lasx,
856 sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
857 else if (cpu_has_lsx && thread_lsx_context_live())
858 new_sp = extframe_alloc(extctx, &extctx->lsx,
859 sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
860 else if (cpu_has_fpu)
861 new_sp = extframe_alloc(extctx, &extctx->fpu,
862 sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
863 }
864
865 #ifdef CONFIG_CPU_HAS_LBT
866 if (cpu_has_lbt && thread_lbt_context_live()) {
867 new_sp = extframe_alloc(extctx, &extctx->lbt,
868 sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
869 }
870 #endif
871
872 return new_sp;
873 }
874
get_sigframe(struct ksignal * ksig,struct pt_regs * regs,struct extctx_layout * extctx)875 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
876 struct extctx_layout *extctx)
877 {
878 unsigned long sp;
879
880 /* Default to using normal stack */
881 sp = regs->regs[3];
882
883 /*
884 * If we are on the alternate signal stack and would overflow it, don't.
885 * Return an always-bogus address instead so we will die with SIGSEGV.
886 */
887 if (on_sig_stack(sp) &&
888 !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
889 return (void __user __force *)(-1UL);
890
891 sp = sigsp(sp, ksig);
892 sp = round_down(sp, 16);
893 sp = setup_extcontext(extctx, sp);
894 sp -= sizeof(struct rt_sigframe);
895
896 if (!IS_ALIGNED(sp, 16))
897 BUG();
898
899 return (void __user *)sp;
900 }
901
902 /*
903 * Atomically swap in the new signal mask, and wait for a signal.
904 */
905
SYSCALL_DEFINE0(rt_sigreturn)906 SYSCALL_DEFINE0(rt_sigreturn)
907 {
908 int sig;
909 sigset_t set;
910 struct pt_regs *regs;
911 struct rt_sigframe __user *frame;
912
913 regs = current_pt_regs();
914 frame = (struct rt_sigframe __user *)regs->regs[3];
915 if (!access_ok(frame, sizeof(*frame)))
916 goto badframe;
917 if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
918 goto badframe;
919
920 set_current_blocked(&set);
921
922 sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
923 if (sig < 0)
924 goto badframe;
925 else if (sig)
926 force_sig(sig);
927
928 regs->regs[0] = 0; /* No syscall restarting */
929 if (restore_altstack(&frame->rs_uctx.uc_stack))
930 goto badframe;
931
932 return regs->regs[4];
933
934 badframe:
935 force_sig(SIGSEGV);
936 return 0;
937 }
938
setup_rt_frame(void * sig_return,struct ksignal * ksig,struct pt_regs * regs,sigset_t * set)939 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
940 struct pt_regs *regs, sigset_t *set)
941 {
942 int err = 0;
943 struct extctx_layout extctx;
944 struct rt_sigframe __user *frame;
945
946 frame = get_sigframe(ksig, regs, &extctx);
947 if (!access_ok(frame, sizeof(*frame) + extctx.size))
948 return -EFAULT;
949
950 /* Create siginfo. */
951 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
952
953 /* Create the ucontext. */
954 err |= __put_user(0, &frame->rs_uctx.uc_flags);
955 err |= __put_user(NULL, &frame->rs_uctx.uc_link);
956 err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
957 err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
958 err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
959
960 if (err)
961 return -EFAULT;
962
963 /*
964 * Arguments to signal handler:
965 *
966 * a0 = signal number
967 * a1 = pointer to siginfo
968 * a2 = pointer to ucontext
969 *
970 * c0_era point to the signal handler, $r3 (sp) points to
971 * the struct rt_sigframe.
972 */
973 regs->regs[4] = ksig->sig;
974 regs->regs[5] = (unsigned long) &frame->rs_info;
975 regs->regs[6] = (unsigned long) &frame->rs_uctx;
976 regs->regs[3] = (unsigned long) frame;
977 regs->regs[1] = (unsigned long) sig_return;
978 regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
979
980 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
981 current->comm, current->pid,
982 frame, regs->csr_era, regs->regs[1]);
983
984 return 0;
985 }
986
handle_signal(struct ksignal * ksig,struct pt_regs * regs)987 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
988 {
989 int ret;
990 sigset_t *oldset = sigmask_to_save();
991 void *vdso = current->mm->context.vdso;
992
993 /* Are we from a system call? */
994 if (regs->regs[0]) {
995 switch (regs->regs[4]) {
996 case -ERESTART_RESTARTBLOCK:
997 case -ERESTARTNOHAND:
998 regs->regs[4] = -EINTR;
999 break;
1000 case -ERESTARTSYS:
1001 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
1002 regs->regs[4] = -EINTR;
1003 break;
1004 }
1005 fallthrough;
1006 case -ERESTARTNOINTR:
1007 regs->regs[4] = regs->orig_a0;
1008 regs->csr_era -= 4;
1009 }
1010
1011 regs->regs[0] = 0; /* Don't deal with this again. */
1012 }
1013
1014 rseq_signal_deliver(ksig, regs);
1015
1016 ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
1017
1018 signal_setup_done(ret, ksig, 0);
1019 }
1020
arch_do_signal_or_restart(struct pt_regs * regs)1021 void arch_do_signal_or_restart(struct pt_regs *regs)
1022 {
1023 struct ksignal ksig;
1024
1025 if (get_signal(&ksig)) {
1026 /* Whee! Actually deliver the signal. */
1027 handle_signal(&ksig, regs);
1028 return;
1029 }
1030
1031 /* Are we from a system call? */
1032 if (regs->regs[0]) {
1033 switch (regs->regs[4]) {
1034 case -ERESTARTNOHAND:
1035 case -ERESTARTSYS:
1036 case -ERESTARTNOINTR:
1037 regs->regs[4] = regs->orig_a0;
1038 regs->csr_era -= 4;
1039 break;
1040
1041 case -ERESTART_RESTARTBLOCK:
1042 regs->regs[4] = regs->orig_a0;
1043 regs->regs[11] = __NR_restart_syscall;
1044 regs->csr_era -= 4;
1045 break;
1046 }
1047 regs->regs[0] = 0; /* Don't deal with this again. */
1048 }
1049
1050 /*
1051 * If there's no signal to deliver, we just put the saved sigmask
1052 * back
1053 */
1054 restore_saved_sigmask();
1055 }
1056