1 /* 2 * Based on arch/arm/kernel/process.c 3 * 4 * Original Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 6 * Copyright (C) 2012 ARM Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include <stdarg.h> 22 23 #include <linux/compat.h> 24 #include <linux/export.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/unistd.h> 30 #include <linux/user.h> 31 #include <linux/delay.h> 32 #include <linux/reboot.h> 33 #include <linux/interrupt.h> 34 #include <linux/kallsyms.h> 35 #include <linux/init.h> 36 #include <linux/cpu.h> 37 #include <linux/elfcore.h> 38 #include <linux/pm.h> 39 #include <linux/tick.h> 40 #include <linux/utsname.h> 41 #include <linux/uaccess.h> 42 #include <linux/random.h> 43 #include <linux/hw_breakpoint.h> 44 #include <linux/personality.h> 45 #include <linux/notifier.h> 46 47 #include <asm/compat.h> 48 #include <asm/cacheflush.h> 49 #include <asm/fpsimd.h> 50 #include <asm/mmu_context.h> 51 #include <asm/processor.h> 52 #include <asm/stacktrace.h> 53 54 #ifdef CONFIG_CC_STACKPROTECTOR 55 #include <linux/stackprotector.h> 56 unsigned long __stack_chk_guard __read_mostly; 57 EXPORT_SYMBOL(__stack_chk_guard); 58 #endif 59 60 void soft_restart(unsigned long addr) 61 { 62 setup_mm_for_reboot(); 63 cpu_soft_restart(virt_to_phys(cpu_reset), addr); 64 /* Should never get here */ 65 BUG(); 66 } 67 68 /* 69 * Function pointers to optional machine specific functions 70 */ 71 void (*pm_power_off)(void); 72 EXPORT_SYMBOL_GPL(pm_power_off); 73 74 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 75 EXPORT_SYMBOL_GPL(arm_pm_restart); 76 77 /* 78 * This is our default idle handler. 79 */ 80 void arch_cpu_idle(void) 81 { 82 /* 83 * This should do all the clock switching and wait for interrupt 84 * tricks 85 */ 86 cpu_do_idle(); 87 local_irq_enable(); 88 } 89 90 #ifdef CONFIG_HOTPLUG_CPU 91 void arch_cpu_idle_dead(void) 92 { 93 cpu_die(); 94 } 95 #endif 96 97 /* 98 * Called by kexec, immediately prior to machine_kexec(). 99 * 100 * This must completely disable all secondary CPUs; simply causing those CPUs 101 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the 102 * kexec'd kernel to use any and all RAM as it sees fit, without having to 103 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug 104 * functionality embodied in disable_nonboot_cpus() to achieve this. 105 */ 106 void machine_shutdown(void) 107 { 108 disable_nonboot_cpus(); 109 } 110 111 /* 112 * Halting simply requires that the secondary CPUs stop performing any 113 * activity (executing tasks, handling interrupts). smp_send_stop() 114 * achieves this. 115 */ 116 void machine_halt(void) 117 { 118 local_irq_disable(); 119 smp_send_stop(); 120 while (1); 121 } 122 123 /* 124 * Power-off simply requires that the secondary CPUs stop performing any 125 * activity (executing tasks, handling interrupts). smp_send_stop() 126 * achieves this. When the system power is turned off, it will take all CPUs 127 * with it. 128 */ 129 void machine_power_off(void) 130 { 131 local_irq_disable(); 132 smp_send_stop(); 133 if (pm_power_off) 134 pm_power_off(); 135 } 136 137 /* 138 * Restart requires that the secondary CPUs stop performing any activity 139 * while the primary CPU resets the system. Systems with a single CPU can 140 * use soft_restart() as their machine descriptor's .restart hook, since that 141 * will cause the only available CPU to reset. Systems with multiple CPUs must 142 * provide a HW restart implementation, to ensure that all CPUs reset at once. 143 * This is required so that any code running after reset on the primary CPU 144 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 145 * executing pre-reset code, and using RAM that the primary CPU's code wishes 146 * to use. Implementing such co-ordination would be essentially impossible. 147 */ 148 void machine_restart(char *cmd) 149 { 150 /* Disable interrupts first */ 151 local_irq_disable(); 152 smp_send_stop(); 153 154 /* Now call the architecture specific reboot code. */ 155 if (arm_pm_restart) 156 arm_pm_restart(reboot_mode, cmd); 157 158 /* 159 * Whoops - the architecture was unable to reboot. 160 */ 161 printk("Reboot failed -- System halted\n"); 162 while (1); 163 } 164 165 void __show_regs(struct pt_regs *regs) 166 { 167 int i, top_reg; 168 u64 lr, sp; 169 170 if (compat_user_mode(regs)) { 171 lr = regs->compat_lr; 172 sp = regs->compat_sp; 173 top_reg = 12; 174 } else { 175 lr = regs->regs[30]; 176 sp = regs->sp; 177 top_reg = 29; 178 } 179 180 show_regs_print_info(KERN_DEFAULT); 181 print_symbol("PC is at %s\n", instruction_pointer(regs)); 182 print_symbol("LR is at %s\n", lr); 183 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 184 regs->pc, lr, regs->pstate); 185 printk("sp : %016llx\n", sp); 186 for (i = top_reg; i >= 0; i--) { 187 printk("x%-2d: %016llx ", i, regs->regs[i]); 188 if (i % 2 == 0) 189 printk("\n"); 190 } 191 printk("\n"); 192 } 193 194 void show_regs(struct pt_regs * regs) 195 { 196 printk("\n"); 197 __show_regs(regs); 198 } 199 200 /* 201 * Free current thread data structures etc.. 202 */ 203 void exit_thread(void) 204 { 205 } 206 207 static void tls_thread_flush(void) 208 { 209 asm ("msr tpidr_el0, xzr"); 210 211 if (is_compat_task()) { 212 current->thread.tp_value = 0; 213 214 /* 215 * We need to ensure ordering between the shadow state and the 216 * hardware state, so that we don't corrupt the hardware state 217 * with a stale shadow state during context switch. 218 */ 219 barrier(); 220 asm ("msr tpidrro_el0, xzr"); 221 } 222 } 223 224 void flush_thread(void) 225 { 226 fpsimd_flush_thread(); 227 tls_thread_flush(); 228 flush_ptrace_hw_breakpoint(current); 229 } 230 231 void release_thread(struct task_struct *dead_task) 232 { 233 } 234 235 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 236 { 237 fpsimd_preserve_current_state(); 238 *dst = *src; 239 return 0; 240 } 241 242 asmlinkage void ret_from_fork(void) asm("ret_from_fork"); 243 244 int copy_thread(unsigned long clone_flags, unsigned long stack_start, 245 unsigned long stk_sz, struct task_struct *p) 246 { 247 struct pt_regs *childregs = task_pt_regs(p); 248 unsigned long tls = p->thread.tp_value; 249 250 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); 251 252 if (likely(!(p->flags & PF_KTHREAD))) { 253 *childregs = *current_pt_regs(); 254 childregs->regs[0] = 0; 255 if (is_compat_thread(task_thread_info(p))) { 256 if (stack_start) 257 childregs->compat_sp = stack_start; 258 } else { 259 /* 260 * Read the current TLS pointer from tpidr_el0 as it may be 261 * out-of-sync with the saved value. 262 */ 263 asm("mrs %0, tpidr_el0" : "=r" (tls)); 264 if (stack_start) { 265 /* 16-byte aligned stack mandatory on AArch64 */ 266 if (stack_start & 15) 267 return -EINVAL; 268 childregs->sp = stack_start; 269 } 270 } 271 /* 272 * If a TLS pointer was passed to clone (4th argument), use it 273 * for the new thread. 274 */ 275 if (clone_flags & CLONE_SETTLS) 276 tls = childregs->regs[3]; 277 } else { 278 memset(childregs, 0, sizeof(struct pt_regs)); 279 childregs->pstate = PSR_MODE_EL1h; 280 p->thread.cpu_context.x19 = stack_start; 281 p->thread.cpu_context.x20 = stk_sz; 282 } 283 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; 284 p->thread.cpu_context.sp = (unsigned long)childregs; 285 p->thread.tp_value = tls; 286 287 ptrace_hw_copy_thread(p); 288 289 return 0; 290 } 291 292 static void tls_thread_switch(struct task_struct *next) 293 { 294 unsigned long tpidr, tpidrro; 295 296 if (!is_compat_task()) { 297 asm("mrs %0, tpidr_el0" : "=r" (tpidr)); 298 current->thread.tp_value = tpidr; 299 } 300 301 if (is_compat_thread(task_thread_info(next))) { 302 tpidr = 0; 303 tpidrro = next->thread.tp_value; 304 } else { 305 tpidr = next->thread.tp_value; 306 tpidrro = 0; 307 } 308 309 asm( 310 " msr tpidr_el0, %0\n" 311 " msr tpidrro_el0, %1" 312 : : "r" (tpidr), "r" (tpidrro)); 313 } 314 315 /* 316 * Thread switching. 317 */ 318 struct task_struct *__switch_to(struct task_struct *prev, 319 struct task_struct *next) 320 { 321 struct task_struct *last; 322 323 fpsimd_thread_switch(next); 324 tls_thread_switch(next); 325 hw_breakpoint_thread_switch(next); 326 contextidr_thread_switch(next); 327 328 /* 329 * Complete any pending TLB or cache maintenance on this CPU in case 330 * the thread migrates to a different CPU. 331 */ 332 dsb(ish); 333 334 /* the actual thread switch */ 335 last = cpu_switch_to(prev, next); 336 337 return last; 338 } 339 340 unsigned long get_wchan(struct task_struct *p) 341 { 342 struct stackframe frame; 343 unsigned long stack_page; 344 int count = 0; 345 if (!p || p == current || p->state == TASK_RUNNING) 346 return 0; 347 348 frame.fp = thread_saved_fp(p); 349 frame.sp = thread_saved_sp(p); 350 frame.pc = thread_saved_pc(p); 351 stack_page = (unsigned long)task_stack_page(p); 352 do { 353 if (frame.sp < stack_page || 354 frame.sp >= stack_page + THREAD_SIZE || 355 unwind_frame(&frame)) 356 return 0; 357 if (!in_sched_functions(frame.pc)) 358 return frame.pc; 359 } while (count ++ < 16); 360 return 0; 361 } 362 363 unsigned long arch_align_stack(unsigned long sp) 364 { 365 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 366 sp -= get_random_int() & ~PAGE_MASK; 367 return sp & ~0xf; 368 } 369 370 static unsigned long randomize_base(unsigned long base) 371 { 372 unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; 373 return randomize_range(base, range_end, 0) ? : base; 374 } 375 376 unsigned long arch_randomize_brk(struct mm_struct *mm) 377 { 378 return randomize_base(mm->brk); 379 } 380 381 unsigned long randomize_et_dyn(unsigned long base) 382 { 383 return randomize_base(base); 384 } 385