1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PARISC Architecture-dependent parts of process handling 4 * based on the work for i386 5 * 6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> 7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> 8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> 9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org> 10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> 11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> 12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com> 13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> 15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> 16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> 17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de> 18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 19 */ 20 21 #include <stdarg.h> 22 23 #include <linux/elf.h> 24 #include <linux/errno.h> 25 #include <linux/kernel.h> 26 #include <linux/mm.h> 27 #include <linux/fs.h> 28 #include <linux/cpu.h> 29 #include <linux/module.h> 30 #include <linux/personality.h> 31 #include <linux/ptrace.h> 32 #include <linux/sched.h> 33 #include <linux/sched/debug.h> 34 #include <linux/sched/task.h> 35 #include <linux/sched/task_stack.h> 36 #include <linux/slab.h> 37 #include <linux/stddef.h> 38 #include <linux/unistd.h> 39 #include <linux/kallsyms.h> 40 #include <linux/uaccess.h> 41 #include <linux/rcupdate.h> 42 #include <linux/random.h> 43 #include <linux/nmi.h> 44 45 #include <asm/io.h> 46 #include <asm/asm-offsets.h> 47 #include <asm/assembly.h> 48 #include <asm/pdc.h> 49 #include <asm/pdc_chassis.h> 50 #include <asm/unwind.h> 51 #include <asm/sections.h> 52 53 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 54 #define CMD_RESET 5 /* reset any module */ 55 56 /* 57 ** The Wright Brothers and Gecko systems have a H/W problem 58 ** (Lasi...'nuf said) may cause a broadcast reset to lockup 59 ** the system. An HVERSION dependent PDC call was developed 60 ** to perform a "safe", platform specific broadcast reset instead 61 ** of kludging up all the code. 62 ** 63 ** Older machines which do not implement PDC_BROADCAST_RESET will 64 ** return (with an error) and the regular broadcast reset can be 65 ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET 66 ** the PDC call will not return (the system will be reset). 67 */ 68 void machine_restart(char *cmd) 69 { 70 #ifdef FASTBOOT_SELFTEST_SUPPORT 71 /* 72 ** If user has modified the Firmware Selftest Bitmap, 73 ** run the tests specified in the bitmap after the 74 ** system is rebooted w/PDC_DO_RESET. 75 ** 76 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests" 77 ** 78 ** Using "directed resets" at each processor with the MEM_TOC 79 ** vector cleared will also avoid running destructive 80 ** memory self tests. (Not implemented yet) 81 */ 82 if (ftc_bitmap) { 83 pdc_do_firm_test_reset(ftc_bitmap); 84 } 85 #endif 86 /* set up a new led state on systems shipped with a LED State panel */ 87 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 88 89 /* "Normal" system reset */ 90 pdc_do_reset(); 91 92 /* Nope...box should reset with just CMD_RESET now */ 93 gsc_writel(CMD_RESET, COMMAND_GLOBAL); 94 95 /* Wait for RESET to lay us to rest. */ 96 while (1) ; 97 98 } 99 100 void (*chassis_power_off)(void); 101 102 /* 103 * This routine is called from sys_reboot to actually turn off the 104 * machine 105 */ 106 void machine_power_off(void) 107 { 108 /* If there is a registered power off handler, call it. */ 109 if (chassis_power_off) 110 chassis_power_off(); 111 112 /* Put the soft power button back under hardware control. 113 * If the user had already pressed the power button, the 114 * following call will immediately power off. */ 115 pdc_soft_power_button(0); 116 117 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 118 119 /* ipmi_poweroff may have been installed. */ 120 if (pm_power_off) 121 pm_power_off(); 122 123 /* It seems we have no way to power the system off via 124 * software. The user has to press the button himself. */ 125 126 printk(KERN_EMERG "System shut down completed.\n" 127 "Please power this system off now."); 128 129 /* prevent soft lockup/stalled CPU messages for endless loop. */ 130 rcu_sysrq_start(); 131 lockup_detector_soft_poweroff(); 132 for (;;); 133 } 134 135 void (*pm_power_off)(void); 136 EXPORT_SYMBOL(pm_power_off); 137 138 void machine_halt(void) 139 { 140 machine_power_off(); 141 } 142 143 void flush_thread(void) 144 { 145 /* Only needs to handle fpu stuff or perf monitors. 146 ** REVISIT: several arches implement a "lazy fpu state". 147 */ 148 } 149 150 void release_thread(struct task_struct *dead_task) 151 { 152 } 153 154 /* 155 * Fill in the FPU structure for a core dump. 156 */ 157 158 int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r) 159 { 160 if (regs == NULL) 161 return 0; 162 163 memcpy(r, regs->fr, sizeof *r); 164 return 1; 165 } 166 167 int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) 168 { 169 memcpy(r, tsk->thread.regs.fr, sizeof(*r)); 170 return 1; 171 } 172 173 /* 174 * Idle thread support 175 * 176 * Detect when running on QEMU with SeaBIOS PDC Firmware and let 177 * QEMU idle the host too. 178 */ 179 180 int running_on_qemu __ro_after_init; 181 EXPORT_SYMBOL(running_on_qemu); 182 183 void __cpuidle arch_cpu_idle_dead(void) 184 { 185 /* nop on real hardware, qemu will offline CPU. */ 186 asm volatile("or %%r31,%%r31,%%r31\n":::); 187 } 188 189 void __cpuidle arch_cpu_idle(void) 190 { 191 local_irq_enable(); 192 193 /* nop on real hardware, qemu will idle sleep. */ 194 asm volatile("or %%r10,%%r10,%%r10\n":::); 195 } 196 197 static int __init parisc_idle_init(void) 198 { 199 if (!running_on_qemu) 200 cpu_idle_poll_ctrl(1); 201 202 return 0; 203 } 204 arch_initcall(parisc_idle_init); 205 206 /* 207 * Copy architecture-specific thread state 208 */ 209 int 210 copy_thread(unsigned long clone_flags, unsigned long usp, 211 unsigned long kthread_arg, struct task_struct *p, unsigned long tls) 212 { 213 struct pt_regs *cregs = &(p->thread.regs); 214 void *stack = task_stack_page(p); 215 216 /* We have to use void * instead of a function pointer, because 217 * function pointers aren't a pointer to the function on 64-bit. 218 * Make them const so the compiler knows they live in .text */ 219 extern void * const ret_from_kernel_thread; 220 extern void * const child_return; 221 222 if (unlikely(p->flags & PF_KTHREAD)) { 223 /* kernel thread */ 224 memset(cregs, 0, sizeof(struct pt_regs)); 225 if (!usp) /* idle thread */ 226 return 0; 227 /* Must exit via ret_from_kernel_thread in order 228 * to call schedule_tail() 229 */ 230 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; 231 cregs->kpc = (unsigned long) &ret_from_kernel_thread; 232 /* 233 * Copy function and argument to be called from 234 * ret_from_kernel_thread. 235 */ 236 #ifdef CONFIG_64BIT 237 cregs->gr[27] = ((unsigned long *)usp)[3]; 238 cregs->gr[26] = ((unsigned long *)usp)[2]; 239 #else 240 cregs->gr[26] = usp; 241 #endif 242 cregs->gr[25] = kthread_arg; 243 } else { 244 /* user thread */ 245 /* usp must be word aligned. This also prevents users from 246 * passing in the value 1 (which is the signal for a special 247 * return for a kernel thread) */ 248 if (usp) { 249 usp = ALIGN(usp, 4); 250 if (likely(usp)) 251 cregs->gr[30] = usp; 252 } 253 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; 254 cregs->kpc = (unsigned long) &child_return; 255 256 /* Setup thread TLS area */ 257 if (clone_flags & CLONE_SETTLS) 258 cregs->cr27 = tls; 259 } 260 261 return 0; 262 } 263 264 unsigned long 265 get_wchan(struct task_struct *p) 266 { 267 struct unwind_frame_info info; 268 unsigned long ip; 269 int count = 0; 270 271 if (!p || p == current || p->state == TASK_RUNNING) 272 return 0; 273 274 /* 275 * These bracket the sleeping functions.. 276 */ 277 278 unwind_frame_init_from_blocked_task(&info, p); 279 do { 280 if (unwind_once(&info) < 0) 281 return 0; 282 ip = info.ip; 283 if (!in_sched_functions(ip)) 284 return ip; 285 } while (count++ < MAX_UNWIND_ENTRIES); 286 return 0; 287 } 288 289 #ifdef CONFIG_64BIT 290 void *dereference_function_descriptor(void *ptr) 291 { 292 Elf64_Fdesc *desc = ptr; 293 void *p; 294 295 if (!get_kernel_nofault(p, (void *)&desc->addr)) 296 ptr = p; 297 return ptr; 298 } 299 300 void *dereference_kernel_function_descriptor(void *ptr) 301 { 302 if (ptr < (void *)__start_opd || 303 ptr >= (void *)__end_opd) 304 return ptr; 305 306 return dereference_function_descriptor(ptr); 307 } 308 #endif 309 310 static inline unsigned long brk_rnd(void) 311 { 312 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; 313 } 314 315 unsigned long arch_randomize_brk(struct mm_struct *mm) 316 { 317 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 318 319 if (ret < mm->brk) 320 return mm->brk; 321 return ret; 322 } 323