1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12 #include <linux/cpu.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kallsyms.h>
16 #include <linux/kernel.h>
17 #include <linux/nmi.h>
18 #include <linux/personality.h>
19 #include <linux/prctl.h>
20 #include <linux/random.h>
21 #include <linux/sched.h>
22 #include <linux/sched/debug.h>
23 #include <linux/sched/task_stack.h>
24
25 #include <asm/abi.h>
26 #include <asm/asm.h>
27 #include <asm/dsemul.h>
28 #include <asm/dsp.h>
29 #include <asm/exec.h>
30 #include <asm/fpu.h>
31 #include <asm/inst.h>
32 #include <asm/irq.h>
33 #include <asm/irq_regs.h>
34 #include <asm/isadep.h>
35 #include <asm/msa.h>
36 #include <asm/mips-cps.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
39 #include <asm/reg.h>
40 #include <asm/stacktrace.h>
41
42 #ifdef CONFIG_HOTPLUG_CPU
arch_cpu_idle_dead(void)43 void __noreturn arch_cpu_idle_dead(void)
44 {
45 play_dead();
46 }
47 #endif
48
49 asmlinkage void ret_from_fork(void);
50 asmlinkage void ret_from_kernel_thread(void);
51
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)52 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
53 {
54 unsigned long status;
55
56 /* New thread loses kernel privileges. */
57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
58 status |= KU_USER;
59 regs->cp0_status = status;
60 lose_fpu(0);
61 clear_thread_flag(TIF_MSA_CTX_LIVE);
62 clear_used_math();
63 #ifdef CONFIG_MIPS_FP_SUPPORT
64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
65 #endif
66 init_dsp();
67 regs->cp0_epc = pc;
68 regs->regs[29] = sp;
69 }
70
exit_thread(struct task_struct * tsk)71 void exit_thread(struct task_struct *tsk)
72 {
73 /*
74 * User threads may have allocated a delay slot emulation frame.
75 * If so, clean up that allocation.
76 */
77 if (!(current->flags & PF_KTHREAD))
78 dsemul_thread_cleanup(tsk);
79 }
80
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)81 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
82 {
83 /*
84 * Save any process state which is live in hardware registers to the
85 * parent context prior to duplication. This prevents the new child
86 * state becoming stale if the parent is preempted before copy_thread()
87 * gets a chance to save the parent's live hardware registers to the
88 * child context.
89 */
90 preempt_disable();
91
92 if (is_msa_enabled())
93 save_msa(current);
94 else if (is_fpu_owner())
95 _save_fp(current);
96
97 save_dsp(current);
98
99 preempt_enable();
100
101 *dst = *src;
102 return 0;
103 }
104
105 /*
106 * Copy architecture-specific thread state
107 */
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)108 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
109 {
110 unsigned long clone_flags = args->flags;
111 unsigned long usp = args->stack;
112 unsigned long tls = args->tls;
113 struct thread_info *ti = task_thread_info(p);
114 struct pt_regs *childregs, *regs = current_pt_regs();
115 unsigned long childksp;
116
117 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
118
119 /* set up new TSS. */
120 childregs = (struct pt_regs *) childksp - 1;
121 /* Put the stack after the struct pt_regs. */
122 childksp = (unsigned long) childregs;
123 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
124
125 /*
126 * New tasks lose permission to use the fpu. This accelerates context
127 * switching for most programs since they don't use the fpu.
128 */
129 clear_tsk_thread_flag(p, TIF_USEDFPU);
130 clear_tsk_thread_flag(p, TIF_USEDMSA);
131 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
132
133 #ifdef CONFIG_MIPS_MT_FPAFF
134 clear_tsk_thread_flag(p, TIF_FPUBOUND);
135 #endif /* CONFIG_MIPS_MT_FPAFF */
136
137 if (unlikely(args->fn)) {
138 /* kernel thread */
139 unsigned long status = p->thread.cp0_status;
140 memset(childregs, 0, sizeof(struct pt_regs));
141 p->thread.reg16 = (unsigned long)args->fn;
142 p->thread.reg17 = (unsigned long)args->fn_arg;
143 p->thread.reg29 = childksp;
144 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
145 #if defined(CONFIG_CPU_R3000)
146 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
147 ((status & (ST0_KUC | ST0_IEC)) << 2);
148 #else
149 status |= ST0_EXL;
150 #endif
151 childregs->cp0_status = status;
152 return 0;
153 }
154
155 /* user thread */
156 *childregs = *regs;
157 childregs->regs[7] = 0; /* Clear error flag */
158 childregs->regs[2] = 0; /* Child gets zero as return value */
159 if (usp)
160 childregs->regs[29] = usp;
161
162 p->thread.reg29 = (unsigned long) childregs;
163 p->thread.reg31 = (unsigned long) ret_from_fork;
164
165 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
166
167 #ifdef CONFIG_MIPS_FP_SUPPORT
168 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
169 #endif
170
171 if (clone_flags & CLONE_SETTLS)
172 ti->tp_value = tls;
173
174 return 0;
175 }
176
177 #ifdef CONFIG_STACKPROTECTOR
178 #include <linux/stackprotector.h>
179 unsigned long __stack_chk_guard __read_mostly;
180 EXPORT_SYMBOL(__stack_chk_guard);
181 #endif
182
183 struct mips_frame_info {
184 void *func;
185 unsigned long func_size;
186 int frame_size;
187 int pc_offset;
188 };
189
190 #define J_TARGET(pc,target) \
191 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
192
is_jr_ra_ins(union mips_instruction * ip)193 static inline int is_jr_ra_ins(union mips_instruction *ip)
194 {
195 #ifdef CONFIG_CPU_MICROMIPS
196 /*
197 * jr16 ra
198 * jr ra
199 */
200 if (mm_insn_16bit(ip->word >> 16)) {
201 if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
202 ip->mm16_r5_format.rt == mm_jr16_op &&
203 ip->mm16_r5_format.imm == 31)
204 return 1;
205 return 0;
206 }
207
208 if (ip->r_format.opcode == mm_pool32a_op &&
209 ip->r_format.func == mm_pool32axf_op &&
210 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
211 ip->r_format.rt == 31)
212 return 1;
213 return 0;
214 #else
215 if (ip->r_format.opcode == spec_op &&
216 ip->r_format.func == jr_op &&
217 ip->r_format.rs == 31)
218 return 1;
219 return 0;
220 #endif
221 }
222
is_ra_save_ins(union mips_instruction * ip,int * poff)223 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
224 {
225 #ifdef CONFIG_CPU_MICROMIPS
226 /*
227 * swsp ra,offset
228 * swm16 reglist,offset(sp)
229 * swm32 reglist,offset(sp)
230 * sw32 ra,offset(sp)
231 * jradiussp - NOT SUPPORTED
232 *
233 * microMIPS is way more fun...
234 */
235 if (mm_insn_16bit(ip->word >> 16)) {
236 switch (ip->mm16_r5_format.opcode) {
237 case mm_swsp16_op:
238 if (ip->mm16_r5_format.rt != 31)
239 return 0;
240
241 *poff = ip->mm16_r5_format.imm;
242 *poff = (*poff << 2) / sizeof(ulong);
243 return 1;
244
245 case mm_pool16c_op:
246 switch (ip->mm16_m_format.func) {
247 case mm_swm16_op:
248 *poff = ip->mm16_m_format.imm;
249 *poff += 1 + ip->mm16_m_format.rlist;
250 *poff = (*poff << 2) / sizeof(ulong);
251 return 1;
252
253 default:
254 return 0;
255 }
256
257 default:
258 return 0;
259 }
260 }
261
262 switch (ip->i_format.opcode) {
263 case mm_sw32_op:
264 if (ip->i_format.rs != 29)
265 return 0;
266 if (ip->i_format.rt != 31)
267 return 0;
268
269 *poff = ip->i_format.simmediate / sizeof(ulong);
270 return 1;
271
272 case mm_pool32b_op:
273 switch (ip->mm_m_format.func) {
274 case mm_swm32_func:
275 if (ip->mm_m_format.rd < 0x10)
276 return 0;
277 if (ip->mm_m_format.base != 29)
278 return 0;
279
280 *poff = ip->mm_m_format.simmediate;
281 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
282 *poff /= sizeof(ulong);
283 return 1;
284 default:
285 return 0;
286 }
287
288 default:
289 return 0;
290 }
291 #else
292 /* sw / sd $ra, offset($sp) */
293 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
294 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
295 *poff = ip->i_format.simmediate / sizeof(ulong);
296 return 1;
297 }
298 #ifdef CONFIG_CPU_LOONGSON64
299 if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
300 (ip->loongson3_lswc2_format.ls == 1) &&
301 (ip->loongson3_lswc2_format.fr == 0) &&
302 (ip->loongson3_lswc2_format.base == 29)) {
303 if (ip->loongson3_lswc2_format.rt == 31) {
304 *poff = ip->loongson3_lswc2_format.offset << 1;
305 return 1;
306 }
307 if (ip->loongson3_lswc2_format.rq == 31) {
308 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
309 return 1;
310 }
311 }
312 #endif
313 return 0;
314 #endif
315 }
316
is_jump_ins(union mips_instruction * ip)317 static inline int is_jump_ins(union mips_instruction *ip)
318 {
319 #ifdef CONFIG_CPU_MICROMIPS
320 /*
321 * jr16,jrc,jalr16,jalr16
322 * jal
323 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
324 * jraddiusp - NOT SUPPORTED
325 *
326 * microMIPS is kind of more fun...
327 */
328 if (mm_insn_16bit(ip->word >> 16)) {
329 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
330 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
331 return 1;
332 return 0;
333 }
334
335 if (ip->j_format.opcode == mm_j32_op)
336 return 1;
337 if (ip->j_format.opcode == mm_jal32_op)
338 return 1;
339 if (ip->r_format.opcode != mm_pool32a_op ||
340 ip->r_format.func != mm_pool32axf_op)
341 return 0;
342 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
343 #else
344 if (ip->j_format.opcode == j_op)
345 return 1;
346 if (ip->j_format.opcode == jal_op)
347 return 1;
348 if (ip->r_format.opcode != spec_op)
349 return 0;
350 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
351 #endif
352 }
353
is_sp_move_ins(union mips_instruction * ip,int * frame_size)354 static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
355 {
356 #ifdef CONFIG_CPU_MICROMIPS
357 unsigned short tmp;
358
359 /*
360 * addiusp -imm
361 * addius5 sp,-imm
362 * addiu32 sp,sp,-imm
363 * jradiussp - NOT SUPPORTED
364 *
365 * microMIPS is not more fun...
366 */
367 if (mm_insn_16bit(ip->word >> 16)) {
368 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
369 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
370 tmp = ip->mm_b0_format.simmediate >> 1;
371 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
372 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
373 tmp ^= 0x100;
374 *frame_size = -(signed short)(tmp << 2);
375 return 1;
376 }
377 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
378 ip->mm16_r5_format.rt == 29) {
379 tmp = ip->mm16_r5_format.imm >> 1;
380 *frame_size = -(signed short)(tmp & 0xf);
381 return 1;
382 }
383 return 0;
384 }
385
386 if (ip->mm_i_format.opcode == mm_addiu32_op &&
387 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
388 *frame_size = -ip->i_format.simmediate;
389 return 1;
390 }
391 #else
392 /* addiu/daddiu sp,sp,-imm */
393 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
394 return 0;
395
396 if (ip->i_format.opcode == addiu_op ||
397 ip->i_format.opcode == daddiu_op) {
398 *frame_size = -ip->i_format.simmediate;
399 return 1;
400 }
401 #endif
402 return 0;
403 }
404
get_frame_info(struct mips_frame_info * info)405 static int get_frame_info(struct mips_frame_info *info)
406 {
407 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
408 union mips_instruction insn, *ip, *ip_end;
409 unsigned int last_insn_size = 0;
410 bool saw_jump = false;
411
412 info->pc_offset = -1;
413 info->frame_size = 0;
414
415 ip = (void *)msk_isa16_mode((ulong)info->func);
416 if (!ip)
417 goto err;
418
419 ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
420
421 while (ip < ip_end) {
422 ip = (void *)ip + last_insn_size;
423
424 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
425 insn.word = ip->halfword[0] << 16;
426 last_insn_size = 2;
427 } else if (is_mmips) {
428 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
429 last_insn_size = 4;
430 } else {
431 insn.word = ip->word;
432 last_insn_size = 4;
433 }
434
435 if (is_jr_ra_ins(ip)) {
436 break;
437 } else if (!info->frame_size) {
438 is_sp_move_ins(&insn, &info->frame_size);
439 continue;
440 } else if (!saw_jump && is_jump_ins(ip)) {
441 /*
442 * If we see a jump instruction, we are finished
443 * with the frame save.
444 *
445 * Some functions can have a shortcut return at
446 * the beginning of the function, so don't start
447 * looking for jump instruction until we see the
448 * frame setup.
449 *
450 * The RA save instruction can get put into the
451 * delay slot of the jump instruction, so look
452 * at the next instruction, too.
453 */
454 saw_jump = true;
455 continue;
456 }
457 if (info->pc_offset == -1 &&
458 is_ra_save_ins(&insn, &info->pc_offset))
459 break;
460 if (saw_jump)
461 break;
462 }
463 if (info->frame_size && info->pc_offset >= 0) /* nested */
464 return 0;
465 if (info->pc_offset < 0) /* leaf */
466 return 1;
467 /* prologue seems bogus... */
468 err:
469 return -1;
470 }
471
472 static struct mips_frame_info schedule_mfi __read_mostly;
473
474 #ifdef CONFIG_KALLSYMS
get___schedule_addr(void)475 static unsigned long get___schedule_addr(void)
476 {
477 return kallsyms_lookup_name("__schedule");
478 }
479 #else
get___schedule_addr(void)480 static unsigned long get___schedule_addr(void)
481 {
482 union mips_instruction *ip = (void *)schedule;
483 int max_insns = 8;
484 int i;
485
486 for (i = 0; i < max_insns; i++, ip++) {
487 if (ip->j_format.opcode == j_op)
488 return J_TARGET(ip, ip->j_format.target);
489 }
490 return 0;
491 }
492 #endif
493
frame_info_init(void)494 static int __init frame_info_init(void)
495 {
496 unsigned long size = 0;
497 #ifdef CONFIG_KALLSYMS
498 unsigned long ofs;
499 #endif
500 unsigned long addr;
501
502 addr = get___schedule_addr();
503 if (!addr)
504 addr = (unsigned long)schedule;
505
506 #ifdef CONFIG_KALLSYMS
507 kallsyms_lookup_size_offset(addr, &size, &ofs);
508 #endif
509 schedule_mfi.func = (void *)addr;
510 schedule_mfi.func_size = size;
511
512 get_frame_info(&schedule_mfi);
513
514 /*
515 * Without schedule() frame info, result given by
516 * thread_saved_pc() and __get_wchan() are not reliable.
517 */
518 if (schedule_mfi.pc_offset < 0)
519 printk("Can't analyze schedule() prologue at %p\n", schedule);
520
521 return 0;
522 }
523
524 arch_initcall(frame_info_init);
525
526 /*
527 * Return saved PC of a blocked thread.
528 */
thread_saved_pc(struct task_struct * tsk)529 static unsigned long thread_saved_pc(struct task_struct *tsk)
530 {
531 struct thread_struct *t = &tsk->thread;
532
533 /* New born processes are a special case */
534 if (t->reg31 == (unsigned long) ret_from_fork)
535 return t->reg31;
536 if (schedule_mfi.pc_offset < 0)
537 return 0;
538 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
539 }
540
541
542 #ifdef CONFIG_KALLSYMS
543 /* generic stack unwinding function */
unwind_stack_by_address(unsigned long stack_page,unsigned long * sp,unsigned long pc,unsigned long * ra)544 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
545 unsigned long *sp,
546 unsigned long pc,
547 unsigned long *ra)
548 {
549 unsigned long low, high, irq_stack_high;
550 struct mips_frame_info info;
551 unsigned long size, ofs;
552 struct pt_regs *regs;
553 int leaf;
554
555 if (!stack_page)
556 return 0;
557
558 /*
559 * IRQ stacks start at IRQ_STACK_START
560 * task stacks at THREAD_SIZE - 32
561 */
562 low = stack_page;
563 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
564 high = stack_page + IRQ_STACK_START;
565 irq_stack_high = high;
566 } else {
567 high = stack_page + THREAD_SIZE - 32;
568 irq_stack_high = 0;
569 }
570
571 /*
572 * If we reached the top of the interrupt stack, start unwinding
573 * the interrupted task stack.
574 */
575 if (unlikely(*sp == irq_stack_high)) {
576 unsigned long task_sp = *(unsigned long *)*sp;
577
578 /*
579 * Check that the pointer saved in the IRQ stack head points to
580 * something within the stack of the current task
581 */
582 if (!object_is_on_stack((void *)task_sp))
583 return 0;
584
585 /*
586 * Follow pointer to tasks kernel stack frame where interrupted
587 * state was saved.
588 */
589 regs = (struct pt_regs *)task_sp;
590 pc = regs->cp0_epc;
591 if (!user_mode(regs) && __kernel_text_address(pc)) {
592 *sp = regs->regs[29];
593 *ra = regs->regs[31];
594 return pc;
595 }
596 return 0;
597 }
598 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
599 return 0;
600 /*
601 * Return ra if an exception occurred at the first instruction
602 */
603 if (unlikely(ofs == 0)) {
604 pc = *ra;
605 *ra = 0;
606 return pc;
607 }
608
609 info.func = (void *)(pc - ofs);
610 info.func_size = ofs; /* analyze from start to ofs */
611 leaf = get_frame_info(&info);
612 if (leaf < 0)
613 return 0;
614
615 if (*sp < low || *sp + info.frame_size > high)
616 return 0;
617
618 if (leaf)
619 /*
620 * For some extreme cases, get_frame_info() can
621 * consider wrongly a nested function as a leaf
622 * one. In that cases avoid to return always the
623 * same value.
624 */
625 pc = pc != *ra ? *ra : 0;
626 else
627 pc = ((unsigned long *)(*sp))[info.pc_offset];
628
629 *sp += info.frame_size;
630 *ra = 0;
631 return __kernel_text_address(pc) ? pc : 0;
632 }
633 EXPORT_SYMBOL(unwind_stack_by_address);
634
635 /* used by show_backtrace() */
unwind_stack(struct task_struct * task,unsigned long * sp,unsigned long pc,unsigned long * ra)636 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
637 unsigned long pc, unsigned long *ra)
638 {
639 unsigned long stack_page = 0;
640 int cpu;
641
642 for_each_possible_cpu(cpu) {
643 if (on_irq_stack(cpu, *sp)) {
644 stack_page = (unsigned long)irq_stack[cpu];
645 break;
646 }
647 }
648
649 if (!stack_page)
650 stack_page = (unsigned long)task_stack_page(task);
651
652 return unwind_stack_by_address(stack_page, sp, pc, ra);
653 }
654 #endif
655
656 /*
657 * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
658 */
__get_wchan(struct task_struct * task)659 unsigned long __get_wchan(struct task_struct *task)
660 {
661 unsigned long pc = 0;
662 #ifdef CONFIG_KALLSYMS
663 unsigned long sp;
664 unsigned long ra = 0;
665 #endif
666
667 if (!task_stack_page(task))
668 goto out;
669
670 pc = thread_saved_pc(task);
671
672 #ifdef CONFIG_KALLSYMS
673 sp = task->thread.reg29 + schedule_mfi.frame_size;
674
675 while (in_sched_functions(pc))
676 pc = unwind_stack(task, &sp, pc, &ra);
677 #endif
678
679 out:
680 return pc;
681 }
682
mips_stack_top(void)683 unsigned long mips_stack_top(void)
684 {
685 unsigned long top = TASK_SIZE & PAGE_MASK;
686
687 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
688 /* One page for branch delay slot "emulation" */
689 top -= PAGE_SIZE;
690 }
691
692 /* Space for the VDSO, data page & GIC user page */
693 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
694 top -= PAGE_SIZE;
695 top -= mips_gic_present() ? PAGE_SIZE : 0;
696
697 /* Space for cache colour alignment */
698 if (cpu_has_dc_aliases)
699 top -= shm_align_mask + 1;
700
701 /* Space to randomize the VDSO base */
702 if (current->flags & PF_RANDOMIZE)
703 top -= VDSO_RANDOMIZE_SIZE;
704
705 return top;
706 }
707
708 /*
709 * Don't forget that the stack pointer must be aligned on a 8 bytes
710 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
711 */
arch_align_stack(unsigned long sp)712 unsigned long arch_align_stack(unsigned long sp)
713 {
714 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
715 sp -= get_random_u32_below(PAGE_SIZE);
716
717 return sp & ALMASK;
718 }
719
720 static struct cpumask backtrace_csd_busy;
721
handle_backtrace(void * info)722 static void handle_backtrace(void *info)
723 {
724 nmi_cpu_backtrace(get_irq_regs());
725 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
726 }
727
728 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
729 CSD_INIT(handle_backtrace, NULL);
730
raise_backtrace(cpumask_t * mask)731 static void raise_backtrace(cpumask_t *mask)
732 {
733 call_single_data_t *csd;
734 int cpu;
735
736 for_each_cpu(cpu, mask) {
737 /*
738 * If we previously sent an IPI to the target CPU & it hasn't
739 * cleared its bit in the busy cpumask then it didn't handle
740 * our previous IPI & it's not safe for us to reuse the
741 * call_single_data_t.
742 */
743 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
744 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
745 cpu);
746 continue;
747 }
748
749 csd = &per_cpu(backtrace_csd, cpu);
750 smp_call_function_single_async(cpu, csd);
751 }
752 }
753
arch_trigger_cpumask_backtrace(const cpumask_t * mask,int exclude_cpu)754 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
755 {
756 nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
757 }
758
mips_get_process_fp_mode(struct task_struct * task)759 int mips_get_process_fp_mode(struct task_struct *task)
760 {
761 int value = 0;
762
763 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
764 value |= PR_FP_MODE_FR;
765 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
766 value |= PR_FP_MODE_FRE;
767
768 return value;
769 }
770
prepare_for_fp_mode_switch(void * unused)771 static long prepare_for_fp_mode_switch(void *unused)
772 {
773 /*
774 * This is icky, but we use this to simply ensure that all CPUs have
775 * context switched, regardless of whether they were previously running
776 * kernel or user code. This ensures that no CPU that a mode-switching
777 * program may execute on keeps its FPU enabled (& in the old mode)
778 * throughout the mode switch.
779 */
780 return 0;
781 }
782
mips_set_process_fp_mode(struct task_struct * task,unsigned int value)783 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
784 {
785 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
786 struct task_struct *t;
787 struct cpumask process_cpus;
788 int cpu;
789
790 /* If nothing to change, return right away, successfully. */
791 if (value == mips_get_process_fp_mode(task))
792 return 0;
793
794 /* Only accept a mode change if 64-bit FP enabled for o32. */
795 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
796 return -EOPNOTSUPP;
797
798 /* And only for o32 tasks. */
799 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
800 return -EOPNOTSUPP;
801
802 /* Check the value is valid */
803 if (value & ~known_bits)
804 return -EOPNOTSUPP;
805
806 /* Setting FRE without FR is not supported. */
807 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
808 return -EOPNOTSUPP;
809
810 /* Avoid inadvertently triggering emulation */
811 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
812 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
813 return -EOPNOTSUPP;
814 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
815 return -EOPNOTSUPP;
816
817 /* FR = 0 not supported in MIPS R6 */
818 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
819 return -EOPNOTSUPP;
820
821 /* Indicate the new FP mode in each thread */
822 for_each_thread(task, t) {
823 /* Update desired FP register width */
824 if (value & PR_FP_MODE_FR) {
825 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
826 } else {
827 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
828 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
829 }
830
831 /* Update desired FP single layout */
832 if (value & PR_FP_MODE_FRE)
833 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
834 else
835 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
836 }
837
838 /*
839 * We need to ensure that all threads in the process have switched mode
840 * before returning, in order to allow userland to not worry about
841 * races. We can do this by forcing all CPUs that any thread in the
842 * process may be running on to schedule something else - in this case
843 * prepare_for_fp_mode_switch().
844 *
845 * We begin by generating a mask of all CPUs that any thread in the
846 * process may be running on.
847 */
848 cpumask_clear(&process_cpus);
849 for_each_thread(task, t)
850 cpumask_set_cpu(task_cpu(t), &process_cpus);
851
852 /*
853 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
854 *
855 * The CPUs may have rescheduled already since we switched mode or
856 * generated the cpumask, but that doesn't matter. If the task in this
857 * process is scheduled out then our scheduling
858 * prepare_for_fp_mode_switch() will simply be redundant. If it's
859 * scheduled in then it will already have picked up the new FP mode
860 * whilst doing so.
861 */
862 cpus_read_lock();
863 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
864 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
865 cpus_read_unlock();
866
867 return 0;
868 }
869
870 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
mips_dump_regs32(u32 * uregs,const struct pt_regs * regs)871 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
872 {
873 unsigned int i;
874
875 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
876 /* k0/k1 are copied as zero. */
877 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
878 uregs[i] = 0;
879 else
880 uregs[i] = regs->regs[i - MIPS32_EF_R0];
881 }
882
883 uregs[MIPS32_EF_LO] = regs->lo;
884 uregs[MIPS32_EF_HI] = regs->hi;
885 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
886 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
887 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
888 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
889 }
890 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
891
892 #ifdef CONFIG_64BIT
mips_dump_regs64(u64 * uregs,const struct pt_regs * regs)893 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
894 {
895 unsigned int i;
896
897 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
898 /* k0/k1 are copied as zero. */
899 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
900 uregs[i] = 0;
901 else
902 uregs[i] = regs->regs[i - MIPS64_EF_R0];
903 }
904
905 uregs[MIPS64_EF_LO] = regs->lo;
906 uregs[MIPS64_EF_HI] = regs->hi;
907 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
908 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
909 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
910 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
911 }
912 #endif /* CONFIG_64BIT */
913