1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/alpha/kernel/process.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 */ 7 8 /* 9 * This file handles the architecture-dependent parts of process handling. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/module.h> 14 #include <linux/sched.h> 15 #include <linux/sched/debug.h> 16 #include <linux/sched/task.h> 17 #include <linux/sched/task_stack.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/stddef.h> 22 #include <linux/unistd.h> 23 #include <linux/ptrace.h> 24 #include <linux/user.h> 25 #include <linux/time.h> 26 #include <linux/major.h> 27 #include <linux/stat.h> 28 #include <linux/vt.h> 29 #include <linux/mman.h> 30 #include <linux/elfcore.h> 31 #include <linux/reboot.h> 32 #include <linux/tty.h> 33 #include <linux/console.h> 34 #include <linux/slab.h> 35 #include <linux/rcupdate.h> 36 37 #include <asm/reg.h> 38 #include <linux/uaccess.h> 39 #include <asm/io.h> 40 #include <asm/hwrpb.h> 41 #include <asm/fpu.h> 42 43 #include "proto.h" 44 #include "pci_impl.h" 45 46 /* 47 * Power off function, if any 48 */ 49 void (*pm_power_off)(void) = machine_power_off; 50 EXPORT_SYMBOL(pm_power_off); 51 52 #ifdef CONFIG_ALPHA_WTINT 53 /* 54 * Sleep the CPU. 55 * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts. 56 */ 57 void arch_cpu_idle(void) 58 { 59 wtint(0); 60 } 61 62 void arch_cpu_idle_dead(void) 63 { 64 wtint(INT_MAX); 65 } 66 #endif /* ALPHA_WTINT */ 67 68 struct halt_info { 69 int mode; 70 char *restart_cmd; 71 }; 72 73 static void 74 common_shutdown_1(void *generic_ptr) 75 { 76 struct halt_info *how = generic_ptr; 77 struct percpu_struct *cpup; 78 unsigned long *pflags, flags; 79 int cpuid = smp_processor_id(); 80 81 /* No point in taking interrupts anymore. */ 82 local_irq_disable(); 83 84 cpup = (struct percpu_struct *) 85 ((unsigned long)hwrpb + hwrpb->processor_offset 86 + hwrpb->processor_size * cpuid); 87 pflags = &cpup->flags; 88 flags = *pflags; 89 90 /* Clear reason to "default"; clear "bootstrap in progress". */ 91 flags &= ~0x00ff0001UL; 92 93 #ifdef CONFIG_SMP 94 /* Secondaries halt here. */ 95 if (cpuid != boot_cpuid) { 96 flags |= 0x00040000UL; /* "remain halted" */ 97 *pflags = flags; 98 set_cpu_present(cpuid, false); 99 set_cpu_possible(cpuid, false); 100 halt(); 101 } 102 #endif 103 104 if (how->mode == LINUX_REBOOT_CMD_RESTART) { 105 if (!how->restart_cmd) { 106 flags |= 0x00020000UL; /* "cold bootstrap" */ 107 } else { 108 /* For SRM, we could probably set environment 109 variables to get this to work. We'd have to 110 delay this until after srm_paging_stop unless 111 we ever got srm_fixup working. 112 113 At the moment, SRM will use the last boot device, 114 but the file and flags will be the defaults, when 115 doing a "warm" bootstrap. */ 116 flags |= 0x00030000UL; /* "warm bootstrap" */ 117 } 118 } else { 119 flags |= 0x00040000UL; /* "remain halted" */ 120 } 121 *pflags = flags; 122 123 #ifdef CONFIG_SMP 124 /* Wait for the secondaries to halt. */ 125 set_cpu_present(boot_cpuid, false); 126 set_cpu_possible(boot_cpuid, false); 127 while (!cpumask_empty(cpu_present_mask)) 128 barrier(); 129 #endif 130 131 /* If booted from SRM, reset some of the original environment. */ 132 if (alpha_using_srm) { 133 #ifdef CONFIG_DUMMY_CONSOLE 134 /* If we've gotten here after SysRq-b, leave interrupt 135 context before taking over the console. */ 136 if (in_hardirq()) 137 irq_exit(); 138 /* This has the effect of resetting the VGA video origin. */ 139 console_lock(); 140 do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); 141 console_unlock(); 142 #endif 143 pci_restore_srm_config(); 144 set_hae(srm_hae); 145 } 146 147 if (alpha_mv.kill_arch) 148 alpha_mv.kill_arch(how->mode); 149 150 if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { 151 /* Unfortunately, since MILO doesn't currently understand 152 the hwrpb bits above, we can't reliably halt the 153 processor and keep it halted. So just loop. */ 154 return; 155 } 156 157 if (alpha_using_srm) 158 srm_paging_stop(); 159 160 halt(); 161 } 162 163 static void 164 common_shutdown(int mode, char *restart_cmd) 165 { 166 struct halt_info args; 167 args.mode = mode; 168 args.restart_cmd = restart_cmd; 169 on_each_cpu(common_shutdown_1, &args, 0); 170 } 171 172 void 173 machine_restart(char *restart_cmd) 174 { 175 common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd); 176 } 177 178 179 void 180 machine_halt(void) 181 { 182 common_shutdown(LINUX_REBOOT_CMD_HALT, NULL); 183 } 184 185 186 void 187 machine_power_off(void) 188 { 189 common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL); 190 } 191 192 193 /* Used by sysrq-p, among others. I don't believe r9-r15 are ever 194 saved in the context it's used. */ 195 196 void 197 show_regs(struct pt_regs *regs) 198 { 199 show_regs_print_info(KERN_DEFAULT); 200 dik_show_regs(regs, NULL); 201 } 202 203 /* 204 * Re-start a thread when doing execve() 205 */ 206 void 207 start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 208 { 209 regs->pc = pc; 210 regs->ps = 8; 211 wrusp(sp); 212 } 213 EXPORT_SYMBOL(start_thread); 214 215 void 216 flush_thread(void) 217 { 218 /* Arrange for each exec'ed process to start off with a clean slate 219 with respect to the FPU. This is all exceptions disabled. */ 220 current_thread_info()->ieee_state = 0; 221 wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0)); 222 223 /* Clean slate for TLS. */ 224 current_thread_info()->pcb.unique = 0; 225 } 226 227 /* 228 * Copy architecture-specific thread state 229 */ 230 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 231 { 232 unsigned long clone_flags = args->flags; 233 unsigned long usp = args->stack; 234 unsigned long tls = args->tls; 235 extern void ret_from_fork(void); 236 extern void ret_from_kernel_thread(void); 237 238 struct thread_info *childti = task_thread_info(p); 239 struct pt_regs *childregs = task_pt_regs(p); 240 struct pt_regs *regs = current_pt_regs(); 241 struct switch_stack *childstack, *stack; 242 243 childstack = ((struct switch_stack *) childregs) - 1; 244 childti->pcb.ksp = (unsigned long) childstack; 245 childti->pcb.flags = 1; /* set FEN, clear everything else */ 246 childti->status |= TS_SAVED_FP | TS_RESTORE_FP; 247 248 if (unlikely(args->fn)) { 249 /* kernel thread */ 250 memset(childstack, 0, 251 sizeof(struct switch_stack) + sizeof(struct pt_regs)); 252 childstack->r26 = (unsigned long) ret_from_kernel_thread; 253 childstack->r9 = (unsigned long) args->fn; 254 childstack->r10 = (unsigned long) args->fn_arg; 255 childregs->hae = alpha_mv.hae_cache; 256 memset(childti->fp, '\0', sizeof(childti->fp)); 257 childti->pcb.usp = 0; 258 return 0; 259 } 260 /* Note: if CLONE_SETTLS is not set, then we must inherit the 261 value from the parent, which will have been set by the block 262 copy in dup_task_struct. This is non-intuitive, but is 263 required for proper operation in the case of a threaded 264 application calling fork. */ 265 if (clone_flags & CLONE_SETTLS) 266 childti->pcb.unique = tls; 267 else 268 regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ 269 childti->pcb.usp = usp ?: rdusp(); 270 *childregs = *regs; 271 childregs->r0 = 0; 272 childregs->r19 = 0; 273 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ 274 stack = ((struct switch_stack *) regs) - 1; 275 *childstack = *stack; 276 childstack->r26 = (unsigned long) ret_from_fork; 277 return 0; 278 } 279 280 /* 281 * Fill in the user structure for a ELF core dump. 282 */ 283 void 284 dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) 285 { 286 /* switch stack follows right below pt_regs: */ 287 struct switch_stack * sw = ((struct switch_stack *) pt) - 1; 288 289 dest[ 0] = pt->r0; 290 dest[ 1] = pt->r1; 291 dest[ 2] = pt->r2; 292 dest[ 3] = pt->r3; 293 dest[ 4] = pt->r4; 294 dest[ 5] = pt->r5; 295 dest[ 6] = pt->r6; 296 dest[ 7] = pt->r7; 297 dest[ 8] = pt->r8; 298 dest[ 9] = sw->r9; 299 dest[10] = sw->r10; 300 dest[11] = sw->r11; 301 dest[12] = sw->r12; 302 dest[13] = sw->r13; 303 dest[14] = sw->r14; 304 dest[15] = sw->r15; 305 dest[16] = pt->r16; 306 dest[17] = pt->r17; 307 dest[18] = pt->r18; 308 dest[19] = pt->r19; 309 dest[20] = pt->r20; 310 dest[21] = pt->r21; 311 dest[22] = pt->r22; 312 dest[23] = pt->r23; 313 dest[24] = pt->r24; 314 dest[25] = pt->r25; 315 dest[26] = pt->r26; 316 dest[27] = pt->r27; 317 dest[28] = pt->r28; 318 dest[29] = pt->gp; 319 dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; 320 dest[31] = pt->pc; 321 322 /* Once upon a time this was the PS value. Which is stupid 323 since that is always 8 for usermode. Usurped for the more 324 useful value of the thread's UNIQUE field. */ 325 dest[32] = ti->pcb.unique; 326 } 327 EXPORT_SYMBOL(dump_elf_thread); 328 329 int 330 dump_elf_task(elf_greg_t *dest, struct task_struct *task) 331 { 332 dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task)); 333 return 1; 334 } 335 EXPORT_SYMBOL(dump_elf_task); 336 337 int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 338 { 339 memcpy(fpu, task_thread_info(t)->fp, 32 * 8); 340 return 1; 341 } 342 343 /* 344 * Return saved PC of a blocked thread. This assumes the frame 345 * pointer is the 6th saved long on the kernel stack and that the 346 * saved return address is the first long in the frame. This all 347 * holds provided the thread blocked through a call to schedule() ($15 348 * is the frame pointer in schedule() and $15 is saved at offset 48 by 349 * entry.S:do_switch_stack). 350 * 351 * Under heavy swap load I've seen this lose in an ugly way. So do 352 * some extra sanity checking on the ranges we expect these pointers 353 * to be in so that we can fail gracefully. This is just for ps after 354 * all. -- r~ 355 */ 356 357 static unsigned long 358 thread_saved_pc(struct task_struct *t) 359 { 360 unsigned long base = (unsigned long)task_stack_page(t); 361 unsigned long fp, sp = task_thread_info(t)->pcb.ksp; 362 363 if (sp > base && sp+6*8 < base + 16*1024) { 364 fp = ((unsigned long*)sp)[6]; 365 if (fp > sp && fp < base + 16*1024) 366 return *(unsigned long *)fp; 367 } 368 369 return 0; 370 } 371 372 unsigned long 373 __get_wchan(struct task_struct *p) 374 { 375 unsigned long schedule_frame; 376 unsigned long pc; 377 378 /* 379 * This one depends on the frame size of schedule(). Do a 380 * "disass schedule" in gdb to find the frame size. Also, the 381 * code assumes that sleep_on() follows immediately after 382 * interruptible_sleep_on() and that add_timer() follows 383 * immediately after interruptible_sleep(). Ugly, isn't it? 384 * Maybe adding a wchan field to task_struct would be better, 385 * after all... 386 */ 387 388 pc = thread_saved_pc(p); 389 if (in_sched_functions(pc)) { 390 schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6]; 391 return ((unsigned long *)schedule_frame)[12]; 392 } 393 return pc; 394 } 395