1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. 7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2004 Thiemo Seufer 10 */ 11 #include <linux/errno.h> 12 #include <linux/sched.h> 13 #include <linux/tick.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/stddef.h> 17 #include <linux/unistd.h> 18 #include <linux/export.h> 19 #include <linux/ptrace.h> 20 #include <linux/mman.h> 21 #include <linux/personality.h> 22 #include <linux/sys.h> 23 #include <linux/user.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/kallsyms.h> 27 #include <linux/random.h> 28 29 #include <asm/asm.h> 30 #include <asm/bootinfo.h> 31 #include <asm/cpu.h> 32 #include <asm/dsp.h> 33 #include <asm/fpu.h> 34 #include <asm/pgtable.h> 35 #include <asm/mipsregs.h> 36 #include <asm/processor.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <asm/elf.h> 40 #include <asm/isadep.h> 41 #include <asm/inst.h> 42 #include <asm/stacktrace.h> 43 44 /* 45 * The idle thread. There's no useful work to be done, so just try to conserve 46 * power and have a low exit latency (ie sit in a loop waiting for somebody to 47 * say that they'd like to reschedule) 48 */ 49 void __noreturn cpu_idle(void) 50 { 51 int cpu; 52 53 /* CPU is going idle. */ 54 cpu = smp_processor_id(); 55 56 /* endless idle loop with no priority at all */ 57 while (1) { 58 tick_nohz_idle_enter(); 59 rcu_idle_enter(); 60 while (!need_resched() && cpu_online(cpu)) { 61 #ifdef CONFIG_MIPS_MT_SMTC 62 extern void smtc_idle_loop_hook(void); 63 64 smtc_idle_loop_hook(); 65 #endif 66 67 if (cpu_wait) { 68 /* Don't trace irqs off for idle */ 69 stop_critical_timings(); 70 (*cpu_wait)(); 71 start_critical_timings(); 72 } 73 } 74 #ifdef CONFIG_HOTPLUG_CPU 75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && 76 (system_state == SYSTEM_RUNNING || 77 system_state == SYSTEM_BOOTING)) 78 play_dead(); 79 #endif 80 rcu_idle_exit(); 81 tick_nohz_idle_exit(); 82 schedule_preempt_disabled(); 83 } 84 } 85 86 asmlinkage void ret_from_fork(void); 87 asmlinkage void ret_from_kernel_thread(void); 88 89 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 90 { 91 unsigned long status; 92 93 /* New thread loses kernel privileges. */ 94 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); 95 #ifdef CONFIG_64BIT 96 status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; 97 #endif 98 status |= KU_USER; 99 regs->cp0_status = status; 100 clear_used_math(); 101 clear_fpu_owner(); 102 if (cpu_has_dsp) 103 __init_dsp(); 104 regs->cp0_epc = pc; 105 regs->regs[29] = sp; 106 } 107 108 void exit_thread(void) 109 { 110 } 111 112 void flush_thread(void) 113 { 114 } 115 116 int copy_thread(unsigned long clone_flags, unsigned long usp, 117 unsigned long arg, struct task_struct *p) 118 { 119 struct thread_info *ti = task_thread_info(p); 120 struct pt_regs *childregs, *regs = current_pt_regs(); 121 unsigned long childksp; 122 p->set_child_tid = p->clear_child_tid = NULL; 123 124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 125 126 preempt_disable(); 127 128 if (is_fpu_owner()) 129 save_fp(p); 130 131 if (cpu_has_dsp) 132 save_dsp(p); 133 134 preempt_enable(); 135 136 /* set up new TSS. */ 137 childregs = (struct pt_regs *) childksp - 1; 138 /* Put the stack after the struct pt_regs. */ 139 childksp = (unsigned long) childregs; 140 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); 141 if (unlikely(p->flags & PF_KTHREAD)) { 142 unsigned long status = p->thread.cp0_status; 143 memset(childregs, 0, sizeof(struct pt_regs)); 144 ti->addr_limit = KERNEL_DS; 145 p->thread.reg16 = usp; /* fn */ 146 p->thread.reg17 = arg; 147 p->thread.reg29 = childksp; 148 p->thread.reg31 = (unsigned long) ret_from_kernel_thread; 149 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 150 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | 151 ((status & (ST0_KUC | ST0_IEC)) << 2); 152 #else 153 status |= ST0_EXL; 154 #endif 155 childregs->cp0_status = status; 156 return 0; 157 } 158 *childregs = *regs; 159 childregs->regs[7] = 0; /* Clear error flag */ 160 childregs->regs[2] = 0; /* Child gets zero as return value */ 161 childregs->regs[29] = usp; 162 ti->addr_limit = USER_DS; 163 164 p->thread.reg29 = (unsigned long) childregs; 165 p->thread.reg31 = (unsigned long) ret_from_fork; 166 167 /* 168 * New tasks lose permission to use the fpu. This accelerates context 169 * switching for most programs since they don't use the fpu. 170 */ 171 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 172 173 #ifdef CONFIG_MIPS_MT_SMTC 174 /* 175 * SMTC restores TCStatus after Status, and the CU bits 176 * are aliased there. 177 */ 178 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); 179 #endif 180 clear_tsk_thread_flag(p, TIF_USEDFPU); 181 182 #ifdef CONFIG_MIPS_MT_FPAFF 183 clear_tsk_thread_flag(p, TIF_FPUBOUND); 184 #endif /* CONFIG_MIPS_MT_FPAFF */ 185 186 if (clone_flags & CLONE_SETTLS) 187 ti->tp_value = regs->regs[7]; 188 189 return 0; 190 } 191 192 /* Fill in the fpu structure for a core dump.. */ 193 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) 194 { 195 memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu)); 196 197 return 1; 198 } 199 200 void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs) 201 { 202 int i; 203 204 for (i = 0; i < EF_R0; i++) 205 gp[i] = 0; 206 gp[EF_R0] = 0; 207 for (i = 1; i <= 31; i++) 208 gp[EF_R0 + i] = regs->regs[i]; 209 gp[EF_R26] = 0; 210 gp[EF_R27] = 0; 211 gp[EF_LO] = regs->lo; 212 gp[EF_HI] = regs->hi; 213 gp[EF_CP0_EPC] = regs->cp0_epc; 214 gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr; 215 gp[EF_CP0_STATUS] = regs->cp0_status; 216 gp[EF_CP0_CAUSE] = regs->cp0_cause; 217 #ifdef EF_UNUSED0 218 gp[EF_UNUSED0] = 0; 219 #endif 220 } 221 222 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 223 { 224 elf_dump_regs(*regs, task_pt_regs(tsk)); 225 return 1; 226 } 227 228 int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) 229 { 230 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); 231 232 return 1; 233 } 234 235 /* 236 * 237 */ 238 struct mips_frame_info { 239 void *func; 240 unsigned long func_size; 241 int frame_size; 242 int pc_offset; 243 }; 244 245 static inline int is_ra_save_ins(union mips_instruction *ip) 246 { 247 /* sw / sd $ra, offset($sp) */ 248 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && 249 ip->i_format.rs == 29 && 250 ip->i_format.rt == 31; 251 } 252 253 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) 254 { 255 if (ip->j_format.opcode == jal_op) 256 return 1; 257 if (ip->r_format.opcode != spec_op) 258 return 0; 259 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; 260 } 261 262 static inline int is_sp_move_ins(union mips_instruction *ip) 263 { 264 /* addiu/daddiu sp,sp,-imm */ 265 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) 266 return 0; 267 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) 268 return 1; 269 return 0; 270 } 271 272 static int get_frame_info(struct mips_frame_info *info) 273 { 274 union mips_instruction *ip = info->func; 275 unsigned max_insns = info->func_size / sizeof(union mips_instruction); 276 unsigned i; 277 278 info->pc_offset = -1; 279 info->frame_size = 0; 280 281 if (!ip) 282 goto err; 283 284 if (max_insns == 0) 285 max_insns = 128U; /* unknown function size */ 286 max_insns = min(128U, max_insns); 287 288 for (i = 0; i < max_insns; i++, ip++) { 289 290 if (is_jal_jalr_jr_ins(ip)) 291 break; 292 if (!info->frame_size) { 293 if (is_sp_move_ins(ip)) 294 info->frame_size = - ip->i_format.simmediate; 295 continue; 296 } 297 if (info->pc_offset == -1 && is_ra_save_ins(ip)) { 298 info->pc_offset = 299 ip->i_format.simmediate / sizeof(long); 300 break; 301 } 302 } 303 if (info->frame_size && info->pc_offset >= 0) /* nested */ 304 return 0; 305 if (info->pc_offset < 0) /* leaf */ 306 return 1; 307 /* prologue seems boggus... */ 308 err: 309 return -1; 310 } 311 312 static struct mips_frame_info schedule_mfi __read_mostly; 313 314 static int __init frame_info_init(void) 315 { 316 unsigned long size = 0; 317 #ifdef CONFIG_KALLSYMS 318 unsigned long ofs; 319 320 kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); 321 #endif 322 schedule_mfi.func = schedule; 323 schedule_mfi.func_size = size; 324 325 get_frame_info(&schedule_mfi); 326 327 /* 328 * Without schedule() frame info, result given by 329 * thread_saved_pc() and get_wchan() are not reliable. 330 */ 331 if (schedule_mfi.pc_offset < 0) 332 printk("Can't analyze schedule() prologue at %p\n", schedule); 333 334 return 0; 335 } 336 337 arch_initcall(frame_info_init); 338 339 /* 340 * Return saved PC of a blocked thread. 341 */ 342 unsigned long thread_saved_pc(struct task_struct *tsk) 343 { 344 struct thread_struct *t = &tsk->thread; 345 346 /* New born processes are a special case */ 347 if (t->reg31 == (unsigned long) ret_from_fork) 348 return t->reg31; 349 if (schedule_mfi.pc_offset < 0) 350 return 0; 351 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; 352 } 353 354 355 #ifdef CONFIG_KALLSYMS 356 /* generic stack unwinding function */ 357 unsigned long notrace unwind_stack_by_address(unsigned long stack_page, 358 unsigned long *sp, 359 unsigned long pc, 360 unsigned long *ra) 361 { 362 struct mips_frame_info info; 363 unsigned long size, ofs; 364 int leaf; 365 extern void ret_from_irq(void); 366 extern void ret_from_exception(void); 367 368 if (!stack_page) 369 return 0; 370 371 /* 372 * If we reached the bottom of interrupt context, 373 * return saved pc in pt_regs. 374 */ 375 if (pc == (unsigned long)ret_from_irq || 376 pc == (unsigned long)ret_from_exception) { 377 struct pt_regs *regs; 378 if (*sp >= stack_page && 379 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { 380 regs = (struct pt_regs *)*sp; 381 pc = regs->cp0_epc; 382 if (__kernel_text_address(pc)) { 383 *sp = regs->regs[29]; 384 *ra = regs->regs[31]; 385 return pc; 386 } 387 } 388 return 0; 389 } 390 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) 391 return 0; 392 /* 393 * Return ra if an exception occurred at the first instruction 394 */ 395 if (unlikely(ofs == 0)) { 396 pc = *ra; 397 *ra = 0; 398 return pc; 399 } 400 401 info.func = (void *)(pc - ofs); 402 info.func_size = ofs; /* analyze from start to ofs */ 403 leaf = get_frame_info(&info); 404 if (leaf < 0) 405 return 0; 406 407 if (*sp < stack_page || 408 *sp + info.frame_size > stack_page + THREAD_SIZE - 32) 409 return 0; 410 411 if (leaf) 412 /* 413 * For some extreme cases, get_frame_info() can 414 * consider wrongly a nested function as a leaf 415 * one. In that cases avoid to return always the 416 * same value. 417 */ 418 pc = pc != *ra ? *ra : 0; 419 else 420 pc = ((unsigned long *)(*sp))[info.pc_offset]; 421 422 *sp += info.frame_size; 423 *ra = 0; 424 return __kernel_text_address(pc) ? pc : 0; 425 } 426 EXPORT_SYMBOL(unwind_stack_by_address); 427 428 /* used by show_backtrace() */ 429 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, 430 unsigned long pc, unsigned long *ra) 431 { 432 unsigned long stack_page = (unsigned long)task_stack_page(task); 433 return unwind_stack_by_address(stack_page, sp, pc, ra); 434 } 435 #endif 436 437 /* 438 * get_wchan - a maintenance nightmare^W^Wpain in the ass ... 439 */ 440 unsigned long get_wchan(struct task_struct *task) 441 { 442 unsigned long pc = 0; 443 #ifdef CONFIG_KALLSYMS 444 unsigned long sp; 445 unsigned long ra = 0; 446 #endif 447 448 if (!task || task == current || task->state == TASK_RUNNING) 449 goto out; 450 if (!task_stack_page(task)) 451 goto out; 452 453 pc = thread_saved_pc(task); 454 455 #ifdef CONFIG_KALLSYMS 456 sp = task->thread.reg29 + schedule_mfi.frame_size; 457 458 while (in_sched_functions(pc)) 459 pc = unwind_stack(task, &sp, pc, &ra); 460 #endif 461 462 out: 463 return pc; 464 } 465 466 /* 467 * Don't forget that the stack pointer must be aligned on a 8 bytes 468 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. 469 */ 470 unsigned long arch_align_stack(unsigned long sp) 471 { 472 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 473 sp -= get_random_int() & ~PAGE_MASK; 474 475 return sp & ALMASK; 476 } 477