1 /* 2 * linux/arch/arm/kernel/process.c 3 * 4 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 5 * Original Copyright (C) 1995 Linus Torvalds 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <stdarg.h> 12 13 #include <linux/export.h> 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/stddef.h> 18 #include <linux/unistd.h> 19 #include <linux/user.h> 20 #include <linux/delay.h> 21 #include <linux/reboot.h> 22 #include <linux/interrupt.h> 23 #include <linux/kallsyms.h> 24 #include <linux/init.h> 25 #include <linux/cpu.h> 26 #include <linux/elfcore.h> 27 #include <linux/pm.h> 28 #include <linux/tick.h> 29 #include <linux/utsname.h> 30 #include <linux/uaccess.h> 31 #include <linux/random.h> 32 #include <linux/hw_breakpoint.h> 33 #include <linux/cpuidle.h> 34 #include <linux/leds.h> 35 36 #include <asm/cacheflush.h> 37 #include <asm/idmap.h> 38 #include <asm/processor.h> 39 #include <asm/thread_notify.h> 40 #include <asm/stacktrace.h> 41 #include <asm/mach/time.h> 42 43 #ifdef CONFIG_CC_STACKPROTECTOR 44 #include <linux/stackprotector.h> 45 unsigned long __stack_chk_guard __read_mostly; 46 EXPORT_SYMBOL(__stack_chk_guard); 47 #endif 48 49 static const char *processor_modes[] = { 50 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , 51 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", 52 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , 53 "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" 54 }; 55 56 static const char *isa_modes[] = { 57 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 58 }; 59 60 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); 61 typedef void (*phys_reset_t)(unsigned long); 62 63 /* 64 * A temporary stack to use for CPU reset. This is static so that we 65 * don't clobber it with the identity mapping. When running with this 66 * stack, any references to the current task *will not work* so you 67 * should really do as little as possible before jumping to your reset 68 * code. 69 */ 70 static u64 soft_restart_stack[16]; 71 72 static void __soft_restart(void *addr) 73 { 74 phys_reset_t phys_reset; 75 76 /* Take out a flat memory mapping. */ 77 setup_mm_for_reboot(); 78 79 /* Clean and invalidate caches */ 80 flush_cache_all(); 81 82 /* Turn off caching */ 83 cpu_proc_fin(); 84 85 /* Push out any further dirty data, and ensure cache is empty */ 86 flush_cache_all(); 87 88 /* Switch to the identity mapping. */ 89 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 90 phys_reset((unsigned long)addr); 91 92 /* Should never get here. */ 93 BUG(); 94 } 95 96 void soft_restart(unsigned long addr) 97 { 98 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); 99 100 /* Disable interrupts first */ 101 local_irq_disable(); 102 local_fiq_disable(); 103 104 /* Disable the L2 if we're the last man standing. */ 105 if (num_online_cpus() == 1) 106 outer_disable(); 107 108 /* Change to the new stack and continue with the reset. */ 109 call_with_stack(__soft_restart, (void *)addr, (void *)stack); 110 111 /* Should never get here. */ 112 BUG(); 113 } 114 115 static void null_restart(char mode, const char *cmd) 116 { 117 } 118 119 /* 120 * Function pointers to optional machine specific functions 121 */ 122 void (*pm_power_off)(void); 123 EXPORT_SYMBOL(pm_power_off); 124 125 void (*arm_pm_restart)(char str, const char *cmd) = null_restart; 126 EXPORT_SYMBOL_GPL(arm_pm_restart); 127 128 /* 129 * This is our default idle handler. 130 */ 131 132 void (*arm_pm_idle)(void); 133 134 static void default_idle(void) 135 { 136 if (arm_pm_idle) 137 arm_pm_idle(); 138 else 139 cpu_do_idle(); 140 local_irq_enable(); 141 } 142 143 void arch_cpu_idle_prepare(void) 144 { 145 local_fiq_enable(); 146 } 147 148 void arch_cpu_idle_enter(void) 149 { 150 ledtrig_cpu(CPU_LED_IDLE_START); 151 #ifdef CONFIG_PL310_ERRATA_769419 152 wmb(); 153 #endif 154 } 155 156 void arch_cpu_idle_exit(void) 157 { 158 ledtrig_cpu(CPU_LED_IDLE_END); 159 } 160 161 #ifdef CONFIG_HOTPLUG_CPU 162 void arch_cpu_idle_dead(void) 163 { 164 cpu_die(); 165 } 166 #endif 167 168 /* 169 * Called from the core idle loop. 170 */ 171 void arch_cpu_idle(void) 172 { 173 if (cpuidle_idle_call()) 174 default_idle(); 175 } 176 177 static char reboot_mode = 'h'; 178 179 int __init reboot_setup(char *str) 180 { 181 reboot_mode = str[0]; 182 return 1; 183 } 184 185 __setup("reboot=", reboot_setup); 186 187 /* 188 * Called by kexec, immediately prior to machine_kexec(). 189 * 190 * This must completely disable all secondary CPUs; simply causing those CPUs 191 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the 192 * kexec'd kernel to use any and all RAM as it sees fit, without having to 193 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug 194 * functionality embodied in disable_nonboot_cpus() to achieve this. 195 */ 196 void machine_shutdown(void) 197 { 198 disable_nonboot_cpus(); 199 } 200 201 /* 202 * Halting simply requires that the secondary CPUs stop performing any 203 * activity (executing tasks, handling interrupts). smp_send_stop() 204 * achieves this. 205 */ 206 void machine_halt(void) 207 { 208 smp_send_stop(); 209 210 local_irq_disable(); 211 while (1); 212 } 213 214 /* 215 * Power-off simply requires that the secondary CPUs stop performing any 216 * activity (executing tasks, handling interrupts). smp_send_stop() 217 * achieves this. When the system power is turned off, it will take all CPUs 218 * with it. 219 */ 220 void machine_power_off(void) 221 { 222 smp_send_stop(); 223 224 if (pm_power_off) 225 pm_power_off(); 226 } 227 228 /* 229 * Restart requires that the secondary CPUs stop performing any activity 230 * while the primary CPU resets the system. Systems with a single CPU can 231 * use soft_restart() as their machine descriptor's .restart hook, since that 232 * will cause the only available CPU to reset. Systems with multiple CPUs must 233 * provide a HW restart implementation, to ensure that all CPUs reset at once. 234 * This is required so that any code running after reset on the primary CPU 235 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 236 * executing pre-reset code, and using RAM that the primary CPU's code wishes 237 * to use. Implementing such co-ordination would be essentially impossible. 238 */ 239 void machine_restart(char *cmd) 240 { 241 smp_send_stop(); 242 243 arm_pm_restart(reboot_mode, cmd); 244 245 /* Give a grace period for failure to restart of 1s */ 246 mdelay(1000); 247 248 /* Whoops - the platform was unable to reboot. Tell the user! */ 249 printk("Reboot failed -- System halted\n"); 250 local_irq_disable(); 251 while (1); 252 } 253 254 void __show_regs(struct pt_regs *regs) 255 { 256 unsigned long flags; 257 char buf[64]; 258 259 show_regs_print_info(KERN_DEFAULT); 260 261 print_symbol("PC is at %s\n", instruction_pointer(regs)); 262 print_symbol("LR is at %s\n", regs->ARM_lr); 263 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 264 "sp : %08lx ip : %08lx fp : %08lx\n", 265 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, 266 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); 267 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 268 regs->ARM_r10, regs->ARM_r9, 269 regs->ARM_r8); 270 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", 271 regs->ARM_r7, regs->ARM_r6, 272 regs->ARM_r5, regs->ARM_r4); 273 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", 274 regs->ARM_r3, regs->ARM_r2, 275 regs->ARM_r1, regs->ARM_r0); 276 277 flags = regs->ARM_cpsr; 278 buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; 279 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; 280 buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; 281 buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; 282 buf[4] = '\0'; 283 284 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", 285 buf, interrupts_enabled(regs) ? "n" : "ff", 286 fast_interrupts_enabled(regs) ? "n" : "ff", 287 processor_modes[processor_mode(regs)], 288 isa_modes[isa_mode(regs)], 289 get_fs() == get_ds() ? "kernel" : "user"); 290 #ifdef CONFIG_CPU_CP15 291 { 292 unsigned int ctrl; 293 294 buf[0] = '\0'; 295 #ifdef CONFIG_CPU_CP15_MMU 296 { 297 unsigned int transbase, dac; 298 asm("mrc p15, 0, %0, c2, c0\n\t" 299 "mrc p15, 0, %1, c3, c0\n" 300 : "=r" (transbase), "=r" (dac)); 301 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 302 transbase, dac); 303 } 304 #endif 305 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); 306 307 printk("Control: %08x%s\n", ctrl, buf); 308 } 309 #endif 310 } 311 312 void show_regs(struct pt_regs * regs) 313 { 314 printk("\n"); 315 __show_regs(regs); 316 dump_stack(); 317 } 318 319 ATOMIC_NOTIFIER_HEAD(thread_notify_head); 320 321 EXPORT_SYMBOL_GPL(thread_notify_head); 322 323 /* 324 * Free current thread data structures etc.. 325 */ 326 void exit_thread(void) 327 { 328 thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); 329 } 330 331 void flush_thread(void) 332 { 333 struct thread_info *thread = current_thread_info(); 334 struct task_struct *tsk = current; 335 336 flush_ptrace_hw_breakpoint(tsk); 337 338 memset(thread->used_cp, 0, sizeof(thread->used_cp)); 339 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 340 memset(&thread->fpstate, 0, sizeof(union fp_state)); 341 342 thread_notify(THREAD_NOTIFY_FLUSH, thread); 343 } 344 345 void release_thread(struct task_struct *dead_task) 346 { 347 } 348 349 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 350 351 int 352 copy_thread(unsigned long clone_flags, unsigned long stack_start, 353 unsigned long stk_sz, struct task_struct *p) 354 { 355 struct thread_info *thread = task_thread_info(p); 356 struct pt_regs *childregs = task_pt_regs(p); 357 358 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 359 360 if (likely(!(p->flags & PF_KTHREAD))) { 361 *childregs = *current_pt_regs(); 362 childregs->ARM_r0 = 0; 363 if (stack_start) 364 childregs->ARM_sp = stack_start; 365 } else { 366 memset(childregs, 0, sizeof(struct pt_regs)); 367 thread->cpu_context.r4 = stk_sz; 368 thread->cpu_context.r5 = stack_start; 369 childregs->ARM_cpsr = SVC_MODE; 370 } 371 thread->cpu_context.pc = (unsigned long)ret_from_fork; 372 thread->cpu_context.sp = (unsigned long)childregs; 373 374 clear_ptrace_hw_breakpoint(p); 375 376 if (clone_flags & CLONE_SETTLS) 377 thread->tp_value = childregs->ARM_r3; 378 379 thread_notify(THREAD_NOTIFY_COPY, thread); 380 381 return 0; 382 } 383 384 /* 385 * Fill in the task's elfregs structure for a core dump. 386 */ 387 int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) 388 { 389 elf_core_copy_regs(elfregs, task_pt_regs(t)); 390 return 1; 391 } 392 393 /* 394 * fill in the fpe structure for a core dump... 395 */ 396 int dump_fpu (struct pt_regs *regs, struct user_fp *fp) 397 { 398 struct thread_info *thread = current_thread_info(); 399 int used_math = thread->used_cp[1] | thread->used_cp[2]; 400 401 if (used_math) 402 memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); 403 404 return used_math != 0; 405 } 406 EXPORT_SYMBOL(dump_fpu); 407 408 unsigned long get_wchan(struct task_struct *p) 409 { 410 struct stackframe frame; 411 int count = 0; 412 if (!p || p == current || p->state == TASK_RUNNING) 413 return 0; 414 415 frame.fp = thread_saved_fp(p); 416 frame.sp = thread_saved_sp(p); 417 frame.lr = 0; /* recovered from the stack */ 418 frame.pc = thread_saved_pc(p); 419 do { 420 int ret = unwind_frame(&frame); 421 if (ret < 0) 422 return 0; 423 if (!in_sched_functions(frame.pc)) 424 return frame.pc; 425 } while (count ++ < 16); 426 return 0; 427 } 428 429 unsigned long arch_randomize_brk(struct mm_struct *mm) 430 { 431 unsigned long range_end = mm->brk + 0x02000000; 432 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 433 } 434 435 #ifdef CONFIG_MMU 436 /* 437 * The vectors page is always readable from user space for the 438 * atomic helpers and the signal restart code. Insert it into the 439 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. 440 */ 441 static struct vm_area_struct gate_vma = { 442 .vm_start = 0xffff0000, 443 .vm_end = 0xffff0000 + PAGE_SIZE, 444 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, 445 }; 446 447 static int __init gate_vma_init(void) 448 { 449 gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 450 return 0; 451 } 452 arch_initcall(gate_vma_init); 453 454 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 455 { 456 return &gate_vma; 457 } 458 459 int in_gate_area(struct mm_struct *mm, unsigned long addr) 460 { 461 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); 462 } 463 464 int in_gate_area_no_mm(unsigned long addr) 465 { 466 return in_gate_area(NULL, addr); 467 } 468 469 const char *arch_vma_name(struct vm_area_struct *vma) 470 { 471 return (vma == &gate_vma) ? "[vectors]" : NULL; 472 } 473 #endif 474