xref: /linux/arch/mips/kernel/signal.c (revision 0b8061c340b643e01da431dd60c75a41bb1d31ec)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1991, 1992  Linus Torvalds
7  * Copyright (C) 1994 - 2000  Ralf Baechle
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #include <linux/cache.h>
12 #include <linux/context_tracking.h>
13 #include <linux/irqflags.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/personality.h>
17 #include <linux/smp.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/errno.h>
21 #include <linux/wait.h>
22 #include <linux/ptrace.h>
23 #include <linux/unistd.h>
24 #include <linux/uprobes.h>
25 #include <linux/compiler.h>
26 #include <linux/syscalls.h>
27 #include <linux/uaccess.h>
28 #include <linux/tracehook.h>
29 
30 #include <asm/abi.h>
31 #include <asm/asm.h>
32 #include <linux/bitops.h>
33 #include <asm/cacheflush.h>
34 #include <asm/fpu.h>
35 #include <asm/sim.h>
36 #include <asm/ucontext.h>
37 #include <asm/cpu-features.h>
38 #include <asm/war.h>
39 #include <asm/dsp.h>
40 #include <asm/inst.h>
41 #include <asm/msa.h>
42 
43 #include "signal-common.h"
44 
45 static int (*save_fp_context)(void __user *sc);
46 static int (*restore_fp_context)(void __user *sc);
47 
48 struct sigframe {
49 	u32 sf_ass[4];		/* argument save space for o32 */
50 	u32 sf_pad[2];		/* Was: signal trampoline */
51 
52 	/* Matches struct ucontext from its uc_mcontext field onwards */
53 	struct sigcontext sf_sc;
54 	sigset_t sf_mask;
55 	unsigned long long sf_extcontext[];
56 };
57 
58 struct rt_sigframe {
59 	u32 rs_ass[4];		/* argument save space for o32 */
60 	u32 rs_pad[2];		/* Was: signal trampoline */
61 	struct siginfo rs_info;
62 	struct ucontext rs_uc;
63 };
64 
65 #ifdef CONFIG_MIPS_FP_SUPPORT
66 
67 /*
68  * Thread saved context copy to/from a signal context presumed to be on the
69  * user stack, and therefore accessed with appropriate macros from uaccess.h.
70  */
71 static int copy_fp_to_sigcontext(void __user *sc)
72 {
73 	struct mips_abi *abi = current->thread.abi;
74 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
75 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
76 	int i;
77 	int err = 0;
78 	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
79 
80 	for (i = 0; i < NUM_FPU_REGS; i += inc) {
81 		err |=
82 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
83 			       &fpregs[i]);
84 	}
85 	err |= __put_user(current->thread.fpu.fcr31, csr);
86 
87 	return err;
88 }
89 
90 static int copy_fp_from_sigcontext(void __user *sc)
91 {
92 	struct mips_abi *abi = current->thread.abi;
93 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
94 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
95 	int i;
96 	int err = 0;
97 	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
98 	u64 fpr_val;
99 
100 	for (i = 0; i < NUM_FPU_REGS; i += inc) {
101 		err |= __get_user(fpr_val, &fpregs[i]);
102 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
103 	}
104 	err |= __get_user(current->thread.fpu.fcr31, csr);
105 
106 	return err;
107 }
108 
109 #else /* !CONFIG_MIPS_FP_SUPPORT */
110 
111 static int copy_fp_to_sigcontext(void __user *sc)
112 {
113 	return 0;
114 }
115 
116 static int copy_fp_from_sigcontext(void __user *sc)
117 {
118 	return 0;
119 }
120 
121 #endif /* !CONFIG_MIPS_FP_SUPPORT */
122 
123 /*
124  * Wrappers for the assembly _{save,restore}_fp_context functions.
125  */
126 static int save_hw_fp_context(void __user *sc)
127 {
128 	struct mips_abi *abi = current->thread.abi;
129 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
130 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
131 
132 	return _save_fp_context(fpregs, csr);
133 }
134 
135 static int restore_hw_fp_context(void __user *sc)
136 {
137 	struct mips_abi *abi = current->thread.abi;
138 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
139 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
140 
141 	return _restore_fp_context(fpregs, csr);
142 }
143 
144 /*
145  * Extended context handling.
146  */
147 
148 static inline void __user *sc_to_extcontext(void __user *sc)
149 {
150 	struct ucontext __user *uc;
151 
152 	/*
153 	 * We can just pretend the sigcontext is always embedded in a struct
154 	 * ucontext here, because the offset from sigcontext to extended
155 	 * context is the same in the struct sigframe case.
156 	 */
157 	uc = container_of(sc, struct ucontext, uc_mcontext);
158 	return &uc->uc_extcontext;
159 }
160 
161 #ifdef CONFIG_CPU_HAS_MSA
162 
163 static int save_msa_extcontext(void __user *buf)
164 {
165 	struct msa_extcontext __user *msa = buf;
166 	uint64_t val;
167 	int i, err;
168 
169 	if (!thread_msa_context_live())
170 		return 0;
171 
172 	/*
173 	 * Ensure that we can't lose the live MSA context between checking
174 	 * for it & writing it to memory.
175 	 */
176 	preempt_disable();
177 
178 	if (is_msa_enabled()) {
179 		/*
180 		 * There are no EVA versions of the vector register load/store
181 		 * instructions, so MSA context has to be saved to kernel memory
182 		 * and then copied to user memory. The save to kernel memory
183 		 * should already have been done when handling scalar FP
184 		 * context.
185 		 */
186 		BUG_ON(IS_ENABLED(CONFIG_EVA));
187 
188 		err = __put_user(read_msa_csr(), &msa->csr);
189 		err |= _save_msa_all_upper(&msa->wr);
190 
191 		preempt_enable();
192 	} else {
193 		preempt_enable();
194 
195 		err = __put_user(current->thread.fpu.msacsr, &msa->csr);
196 
197 		for (i = 0; i < NUM_FPU_REGS; i++) {
198 			val = get_fpr64(&current->thread.fpu.fpr[i], 1);
199 			err |= __put_user(val, &msa->wr[i]);
200 		}
201 	}
202 
203 	err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
204 	err |= __put_user(sizeof(*msa), &msa->ext.size);
205 
206 	return err ? -EFAULT : sizeof(*msa);
207 }
208 
209 static int restore_msa_extcontext(void __user *buf, unsigned int size)
210 {
211 	struct msa_extcontext __user *msa = buf;
212 	unsigned long long val;
213 	unsigned int csr;
214 	int i, err;
215 
216 	if (size != sizeof(*msa))
217 		return -EINVAL;
218 
219 	err = get_user(csr, &msa->csr);
220 	if (err)
221 		return err;
222 
223 	preempt_disable();
224 
225 	if (is_msa_enabled()) {
226 		/*
227 		 * There are no EVA versions of the vector register load/store
228 		 * instructions, so MSA context has to be copied to kernel
229 		 * memory and later loaded to registers. The same is true of
230 		 * scalar FP context, so FPU & MSA should have already been
231 		 * disabled whilst handling scalar FP context.
232 		 */
233 		BUG_ON(IS_ENABLED(CONFIG_EVA));
234 
235 		write_msa_csr(csr);
236 		err |= _restore_msa_all_upper(&msa->wr);
237 		preempt_enable();
238 	} else {
239 		preempt_enable();
240 
241 		current->thread.fpu.msacsr = csr;
242 
243 		for (i = 0; i < NUM_FPU_REGS; i++) {
244 			err |= __get_user(val, &msa->wr[i]);
245 			set_fpr64(&current->thread.fpu.fpr[i], 1, val);
246 		}
247 	}
248 
249 	return err;
250 }
251 
252 #else /* !CONFIG_CPU_HAS_MSA */
253 
254 static int save_msa_extcontext(void __user *buf)
255 {
256 	return 0;
257 }
258 
259 static int restore_msa_extcontext(void __user *buf, unsigned int size)
260 {
261 	return SIGSYS;
262 }
263 
264 #endif /* !CONFIG_CPU_HAS_MSA */
265 
266 static int save_extcontext(void __user *buf)
267 {
268 	int sz;
269 
270 	sz = save_msa_extcontext(buf);
271 	if (sz < 0)
272 		return sz;
273 	buf += sz;
274 
275 	/* If no context was saved then trivially return */
276 	if (!sz)
277 		return 0;
278 
279 	/* Write the end marker */
280 	if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
281 		return -EFAULT;
282 
283 	sz += sizeof(((struct extcontext *)NULL)->magic);
284 	return sz;
285 }
286 
287 static int restore_extcontext(void __user *buf)
288 {
289 	struct extcontext ext;
290 	int err;
291 
292 	while (1) {
293 		err = __get_user(ext.magic, (unsigned int *)buf);
294 		if (err)
295 			return err;
296 
297 		if (ext.magic == END_EXTCONTEXT_MAGIC)
298 			return 0;
299 
300 		err = __get_user(ext.size, (unsigned int *)(buf
301 			+ offsetof(struct extcontext, size)));
302 		if (err)
303 			return err;
304 
305 		switch (ext.magic) {
306 		case MSA_EXTCONTEXT_MAGIC:
307 			err = restore_msa_extcontext(buf, ext.size);
308 			break;
309 
310 		default:
311 			err = -EINVAL;
312 			break;
313 		}
314 
315 		if (err)
316 			return err;
317 
318 		buf += ext.size;
319 	}
320 }
321 
322 /*
323  * Helper routines
324  */
325 int protected_save_fp_context(void __user *sc)
326 {
327 	struct mips_abi *abi = current->thread.abi;
328 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
329 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
330 	uint32_t __user *used_math = sc + abi->off_sc_used_math;
331 	unsigned int used, ext_sz;
332 	int err;
333 
334 	used = used_math() ? USED_FP : 0;
335 	if (!used)
336 		goto fp_done;
337 
338 	if (!test_thread_flag(TIF_32BIT_FPREGS))
339 		used |= USED_FR1;
340 	if (test_thread_flag(TIF_HYBRID_FPREGS))
341 		used |= USED_HYBRID_FPRS;
342 
343 	/*
344 	 * EVA does not have userland equivalents of ldc1 or sdc1, so
345 	 * save to the kernel FP context & copy that to userland below.
346 	 */
347 	if (IS_ENABLED(CONFIG_EVA))
348 		lose_fpu(1);
349 
350 	while (1) {
351 		lock_fpu_owner();
352 		if (is_fpu_owner()) {
353 			err = save_fp_context(sc);
354 			unlock_fpu_owner();
355 		} else {
356 			unlock_fpu_owner();
357 			err = copy_fp_to_sigcontext(sc);
358 		}
359 		if (likely(!err))
360 			break;
361 		/* touch the sigcontext and try again */
362 		err = __put_user(0, &fpregs[0]) |
363 			__put_user(0, &fpregs[31]) |
364 			__put_user(0, csr);
365 		if (err)
366 			return err;	/* really bad sigcontext */
367 	}
368 
369 fp_done:
370 	ext_sz = err = save_extcontext(sc_to_extcontext(sc));
371 	if (err < 0)
372 		return err;
373 	used |= ext_sz ? USED_EXTCONTEXT : 0;
374 
375 	return __put_user(used, used_math);
376 }
377 
378 int protected_restore_fp_context(void __user *sc)
379 {
380 	struct mips_abi *abi = current->thread.abi;
381 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
382 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
383 	uint32_t __user *used_math = sc + abi->off_sc_used_math;
384 	unsigned int used;
385 	int err, sig = 0, tmp __maybe_unused;
386 
387 	err = __get_user(used, used_math);
388 	conditional_used_math(used & USED_FP);
389 
390 	/*
391 	 * The signal handler may have used FPU; give it up if the program
392 	 * doesn't want it following sigreturn.
393 	 */
394 	if (err || !(used & USED_FP))
395 		lose_fpu(0);
396 	if (err)
397 		return err;
398 	if (!(used & USED_FP))
399 		goto fp_done;
400 
401 	err = sig = fpcsr_pending(csr);
402 	if (err < 0)
403 		return err;
404 
405 	/*
406 	 * EVA does not have userland equivalents of ldc1 or sdc1, so we
407 	 * disable the FPU here such that the code below simply copies to
408 	 * the kernel FP context.
409 	 */
410 	if (IS_ENABLED(CONFIG_EVA))
411 		lose_fpu(0);
412 
413 	while (1) {
414 		lock_fpu_owner();
415 		if (is_fpu_owner()) {
416 			err = restore_fp_context(sc);
417 			unlock_fpu_owner();
418 		} else {
419 			unlock_fpu_owner();
420 			err = copy_fp_from_sigcontext(sc);
421 		}
422 		if (likely(!err))
423 			break;
424 		/* touch the sigcontext and try again */
425 		err = __get_user(tmp, &fpregs[0]) |
426 			__get_user(tmp, &fpregs[31]) |
427 			__get_user(tmp, csr);
428 		if (err)
429 			break;	/* really bad sigcontext */
430 	}
431 
432 fp_done:
433 	if (!err && (used & USED_EXTCONTEXT))
434 		err = restore_extcontext(sc_to_extcontext(sc));
435 
436 	return err ?: sig;
437 }
438 
439 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
440 {
441 	int err = 0;
442 	int i;
443 
444 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
445 
446 	err |= __put_user(0, &sc->sc_regs[0]);
447 	for (i = 1; i < 32; i++)
448 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
449 
450 #ifdef CONFIG_CPU_HAS_SMARTMIPS
451 	err |= __put_user(regs->acx, &sc->sc_acx);
452 #endif
453 	err |= __put_user(regs->hi, &sc->sc_mdhi);
454 	err |= __put_user(regs->lo, &sc->sc_mdlo);
455 	if (cpu_has_dsp) {
456 		err |= __put_user(mfhi1(), &sc->sc_hi1);
457 		err |= __put_user(mflo1(), &sc->sc_lo1);
458 		err |= __put_user(mfhi2(), &sc->sc_hi2);
459 		err |= __put_user(mflo2(), &sc->sc_lo2);
460 		err |= __put_user(mfhi3(), &sc->sc_hi3);
461 		err |= __put_user(mflo3(), &sc->sc_lo3);
462 		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
463 	}
464 
465 
466 	/*
467 	 * Save FPU state to signal context. Signal handler
468 	 * will "inherit" current FPU state.
469 	 */
470 	err |= protected_save_fp_context(sc);
471 
472 	return err;
473 }
474 
475 static size_t extcontext_max_size(void)
476 {
477 	size_t sz = 0;
478 
479 	/*
480 	 * The assumption here is that between this point & the point at which
481 	 * the extended context is saved the size of the context should only
482 	 * ever be able to shrink (if the task is preempted), but never grow.
483 	 * That is, what this function returns is an upper bound on the size of
484 	 * the extended context for the current task at the current time.
485 	 */
486 
487 	if (thread_msa_context_live())
488 		sz += sizeof(struct msa_extcontext);
489 
490 	/* If any context is saved then we'll append the end marker */
491 	if (sz)
492 		sz += sizeof(((struct extcontext *)NULL)->magic);
493 
494 	return sz;
495 }
496 
497 int fpcsr_pending(unsigned int __user *fpcsr)
498 {
499 	int err, sig = 0;
500 	unsigned int csr, enabled;
501 
502 	err = __get_user(csr, fpcsr);
503 	enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
504 	/*
505 	 * If the signal handler set some FPU exceptions, clear it and
506 	 * send SIGFPE.
507 	 */
508 	if (csr & enabled) {
509 		csr &= ~enabled;
510 		err |= __put_user(csr, fpcsr);
511 		sig = SIGFPE;
512 	}
513 	return err ?: sig;
514 }
515 
516 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
517 {
518 	unsigned long treg;
519 	int err = 0;
520 	int i;
521 
522 	/* Always make any pending restarted system calls return -EINTR */
523 	current->restart_block.fn = do_no_restart_syscall;
524 
525 	err |= __get_user(regs->cp0_epc, &sc->sc_pc);
526 
527 #ifdef CONFIG_CPU_HAS_SMARTMIPS
528 	err |= __get_user(regs->acx, &sc->sc_acx);
529 #endif
530 	err |= __get_user(regs->hi, &sc->sc_mdhi);
531 	err |= __get_user(regs->lo, &sc->sc_mdlo);
532 	if (cpu_has_dsp) {
533 		err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
534 		err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
535 		err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
536 		err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
537 		err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
538 		err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
539 		err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
540 	}
541 
542 	for (i = 1; i < 32; i++)
543 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
544 
545 	return err ?: protected_restore_fp_context(sc);
546 }
547 
548 #ifdef CONFIG_WAR_ICACHE_REFILLS
549 #define SIGMASK		~(cpu_icache_line_size()-1)
550 #else
551 #define SIGMASK		ALMASK
552 #endif
553 
554 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
555 			  size_t frame_size)
556 {
557 	unsigned long sp;
558 
559 	/* Leave space for potential extended context */
560 	frame_size += extcontext_max_size();
561 
562 	/* Default to using normal stack */
563 	sp = regs->regs[29];
564 
565 	/*
566 	 * FPU emulator may have it's own trampoline active just
567 	 * above the user stack, 16-bytes before the next lowest
568 	 * 16 byte boundary.  Try to avoid trashing it.
569 	 */
570 	sp -= 32;
571 
572 	sp = sigsp(sp, ksig);
573 
574 	return (void __user *)((sp - frame_size) & SIGMASK);
575 }
576 
577 /*
578  * Atomically swap in the new signal mask, and wait for a signal.
579  */
580 
581 #ifdef CONFIG_TRAD_SIGNALS
582 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
583 {
584 	return sys_rt_sigsuspend(uset, sizeof(sigset_t));
585 }
586 #endif
587 
588 #ifdef CONFIG_TRAD_SIGNALS
589 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
590 	struct sigaction __user *, oact)
591 {
592 	struct k_sigaction new_ka, old_ka;
593 	int ret;
594 	int err = 0;
595 
596 	if (act) {
597 		old_sigset_t mask;
598 
599 		if (!access_ok(act, sizeof(*act)))
600 			return -EFAULT;
601 		err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
602 		err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
603 		err |= __get_user(mask, &act->sa_mask.sig[0]);
604 		if (err)
605 			return -EFAULT;
606 
607 		siginitset(&new_ka.sa.sa_mask, mask);
608 	}
609 
610 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
611 
612 	if (!ret && oact) {
613 		if (!access_ok(oact, sizeof(*oact)))
614 			return -EFAULT;
615 		err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
616 		err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
617 		err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
618 		err |= __put_user(0, &oact->sa_mask.sig[1]);
619 		err |= __put_user(0, &oact->sa_mask.sig[2]);
620 		err |= __put_user(0, &oact->sa_mask.sig[3]);
621 		if (err)
622 			return -EFAULT;
623 	}
624 
625 	return ret;
626 }
627 #endif
628 
629 #ifdef CONFIG_TRAD_SIGNALS
630 asmlinkage void sys_sigreturn(void)
631 {
632 	struct sigframe __user *frame;
633 	struct pt_regs *regs;
634 	sigset_t blocked;
635 	int sig;
636 
637 	regs = current_pt_regs();
638 	frame = (struct sigframe __user *)regs->regs[29];
639 	if (!access_ok(frame, sizeof(*frame)))
640 		goto badframe;
641 	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
642 		goto badframe;
643 
644 	set_current_blocked(&blocked);
645 
646 	sig = restore_sigcontext(regs, &frame->sf_sc);
647 	if (sig < 0)
648 		goto badframe;
649 	else if (sig)
650 		force_sig(sig);
651 
652 	/*
653 	 * Don't let your children do this ...
654 	 */
655 	__asm__ __volatile__(
656 		"move\t$29, %0\n\t"
657 		"j\tsyscall_exit"
658 		: /* no outputs */
659 		: "r" (regs));
660 	/* Unreached */
661 
662 badframe:
663 	force_sig(SIGSEGV);
664 }
665 #endif /* CONFIG_TRAD_SIGNALS */
666 
667 asmlinkage void sys_rt_sigreturn(void)
668 {
669 	struct rt_sigframe __user *frame;
670 	struct pt_regs *regs;
671 	sigset_t set;
672 	int sig;
673 
674 	regs = current_pt_regs();
675 	frame = (struct rt_sigframe __user *)regs->regs[29];
676 	if (!access_ok(frame, sizeof(*frame)))
677 		goto badframe;
678 	if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
679 		goto badframe;
680 
681 	set_current_blocked(&set);
682 
683 	sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
684 	if (sig < 0)
685 		goto badframe;
686 	else if (sig)
687 		force_sig(sig);
688 
689 	if (restore_altstack(&frame->rs_uc.uc_stack))
690 		goto badframe;
691 
692 	/*
693 	 * Don't let your children do this ...
694 	 */
695 	__asm__ __volatile__(
696 		"move\t$29, %0\n\t"
697 		"j\tsyscall_exit"
698 		: /* no outputs */
699 		: "r" (regs));
700 	/* Unreached */
701 
702 badframe:
703 	force_sig(SIGSEGV);
704 }
705 
706 #ifdef CONFIG_TRAD_SIGNALS
707 static int setup_frame(void *sig_return, struct ksignal *ksig,
708 		       struct pt_regs *regs, sigset_t *set)
709 {
710 	struct sigframe __user *frame;
711 	int err = 0;
712 
713 	frame = get_sigframe(ksig, regs, sizeof(*frame));
714 	if (!access_ok(frame, sizeof (*frame)))
715 		return -EFAULT;
716 
717 	err |= setup_sigcontext(regs, &frame->sf_sc);
718 	err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
719 	if (err)
720 		return -EFAULT;
721 
722 	/*
723 	 * Arguments to signal handler:
724 	 *
725 	 *   a0 = signal number
726 	 *   a1 = 0 (should be cause)
727 	 *   a2 = pointer to struct sigcontext
728 	 *
729 	 * $25 and c0_epc point to the signal handler, $29 points to the
730 	 * struct sigframe.
731 	 */
732 	regs->regs[ 4] = ksig->sig;
733 	regs->regs[ 5] = 0;
734 	regs->regs[ 6] = (unsigned long) &frame->sf_sc;
735 	regs->regs[29] = (unsigned long) frame;
736 	regs->regs[31] = (unsigned long) sig_return;
737 	regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
738 
739 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
740 	       current->comm, current->pid,
741 	       frame, regs->cp0_epc, regs->regs[31]);
742 	return 0;
743 }
744 #endif
745 
746 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
747 			  struct pt_regs *regs, sigset_t *set)
748 {
749 	struct rt_sigframe __user *frame;
750 	int err = 0;
751 
752 	frame = get_sigframe(ksig, regs, sizeof(*frame));
753 	if (!access_ok(frame, sizeof (*frame)))
754 		return -EFAULT;
755 
756 	/* Create siginfo.  */
757 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
758 
759 	/* Create the ucontext.	 */
760 	err |= __put_user(0, &frame->rs_uc.uc_flags);
761 	err |= __put_user(NULL, &frame->rs_uc.uc_link);
762 	err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
763 	err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
764 	err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
765 
766 	if (err)
767 		return -EFAULT;
768 
769 	/*
770 	 * Arguments to signal handler:
771 	 *
772 	 *   a0 = signal number
773 	 *   a1 = 0 (should be cause)
774 	 *   a2 = pointer to ucontext
775 	 *
776 	 * $25 and c0_epc point to the signal handler, $29 points to
777 	 * the struct rt_sigframe.
778 	 */
779 	regs->regs[ 4] = ksig->sig;
780 	regs->regs[ 5] = (unsigned long) &frame->rs_info;
781 	regs->regs[ 6] = (unsigned long) &frame->rs_uc;
782 	regs->regs[29] = (unsigned long) frame;
783 	regs->regs[31] = (unsigned long) sig_return;
784 	regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
785 
786 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
787 	       current->comm, current->pid,
788 	       frame, regs->cp0_epc, regs->regs[31]);
789 
790 	return 0;
791 }
792 
793 struct mips_abi mips_abi = {
794 #ifdef CONFIG_TRAD_SIGNALS
795 	.setup_frame	= setup_frame,
796 #endif
797 	.setup_rt_frame = setup_rt_frame,
798 	.restart	= __NR_restart_syscall,
799 
800 	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
801 	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
802 	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
803 
804 	.vdso		= &vdso_image,
805 };
806 
807 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
808 {
809 	sigset_t *oldset = sigmask_to_save();
810 	int ret;
811 	struct mips_abi *abi = current->thread.abi;
812 	void *vdso = current->mm->context.vdso;
813 
814 	/*
815 	 * If we were emulating a delay slot instruction, exit that frame such
816 	 * that addresses in the sigframe are as expected for userland and we
817 	 * don't have a problem if we reuse the thread's frame for an
818 	 * instruction within the signal handler.
819 	 */
820 	dsemul_thread_rollback(regs);
821 
822 	if (regs->regs[0]) {
823 		switch(regs->regs[2]) {
824 		case ERESTART_RESTARTBLOCK:
825 		case ERESTARTNOHAND:
826 			regs->regs[2] = EINTR;
827 			break;
828 		case ERESTARTSYS:
829 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
830 				regs->regs[2] = EINTR;
831 				break;
832 			}
833 			fallthrough;
834 		case ERESTARTNOINTR:
835 			regs->regs[7] = regs->regs[26];
836 			regs->regs[2] = regs->regs[0];
837 			regs->cp0_epc -= 4;
838 		}
839 
840 		regs->regs[0] = 0;		/* Don't deal with this again.	*/
841 	}
842 
843 	rseq_signal_deliver(ksig, regs);
844 
845 	if (sig_uses_siginfo(&ksig->ka, abi))
846 		ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
847 					  ksig, regs, oldset);
848 	else
849 		ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn,
850 				       ksig, regs, oldset);
851 
852 	signal_setup_done(ret, ksig, 0);
853 }
854 
855 static void do_signal(struct pt_regs *regs)
856 {
857 	struct ksignal ksig;
858 
859 	if (get_signal(&ksig)) {
860 		/* Whee!  Actually deliver the signal.	*/
861 		handle_signal(&ksig, regs);
862 		return;
863 	}
864 
865 	if (regs->regs[0]) {
866 		switch (regs->regs[2]) {
867 		case ERESTARTNOHAND:
868 		case ERESTARTSYS:
869 		case ERESTARTNOINTR:
870 			regs->regs[2] = regs->regs[0];
871 			regs->regs[7] = regs->regs[26];
872 			regs->cp0_epc -= 4;
873 			break;
874 
875 		case ERESTART_RESTARTBLOCK:
876 			regs->regs[2] = current->thread.abi->restart;
877 			regs->regs[7] = regs->regs[26];
878 			regs->cp0_epc -= 4;
879 			break;
880 		}
881 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
882 	}
883 
884 	/*
885 	 * If there's no signal to deliver, we just put the saved sigmask
886 	 * back
887 	 */
888 	restore_saved_sigmask();
889 }
890 
891 /*
892  * notification of userspace execution resumption
893  * - triggered by the TIF_WORK_MASK flags
894  */
895 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
896 	__u32 thread_info_flags)
897 {
898 	local_irq_enable();
899 
900 	user_exit();
901 
902 	if (thread_info_flags & _TIF_UPROBE)
903 		uprobe_notify_resume(regs);
904 
905 	/* deal with pending signal delivery */
906 	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
907 		do_signal(regs);
908 
909 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
910 		tracehook_notify_resume(regs);
911 		rseq_handle_notify_resume(NULL, regs);
912 	}
913 
914 	user_enter();
915 }
916 
917 #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
918 static int smp_save_fp_context(void __user *sc)
919 {
920 	return raw_cpu_has_fpu
921 	       ? save_hw_fp_context(sc)
922 	       : copy_fp_to_sigcontext(sc);
923 }
924 
925 static int smp_restore_fp_context(void __user *sc)
926 {
927 	return raw_cpu_has_fpu
928 	       ? restore_hw_fp_context(sc)
929 	       : copy_fp_from_sigcontext(sc);
930 }
931 #endif
932 
933 static int signal_setup(void)
934 {
935 	/*
936 	 * The offset from sigcontext to extended context should be the same
937 	 * regardless of the type of signal, such that userland can always know
938 	 * where to look if it wishes to find the extended context structures.
939 	 */
940 	BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
941 		      offsetof(struct sigframe, sf_sc)) !=
942 		     (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
943 		      offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
944 
945 #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
946 	/* For now just do the cpu_has_fpu check when the functions are invoked */
947 	save_fp_context = smp_save_fp_context;
948 	restore_fp_context = smp_restore_fp_context;
949 #else
950 	if (cpu_has_fpu) {
951 		save_fp_context = save_hw_fp_context;
952 		restore_fp_context = restore_hw_fp_context;
953 	} else {
954 		save_fp_context = copy_fp_to_sigcontext;
955 		restore_fp_context = copy_fp_from_sigcontext;
956 	}
957 #endif /* CONFIG_SMP */
958 
959 	return 0;
960 }
961 
962 arch_initcall(signal_setup);
963