1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/alpha/kernel/process.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 */ 7 8 /* 9 * This file handles the architecture-dependent parts of process handling. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/module.h> 14 #include <linux/sched.h> 15 #include <linux/sched/debug.h> 16 #include <linux/sched/task.h> 17 #include <linux/sched/task_stack.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/stddef.h> 22 #include <linux/unistd.h> 23 #include <linux/ptrace.h> 24 #include <linux/user.h> 25 #include <linux/time.h> 26 #include <linux/major.h> 27 #include <linux/stat.h> 28 #include <linux/vt.h> 29 #include <linux/mman.h> 30 #include <linux/elfcore.h> 31 #include <linux/reboot.h> 32 #include <linux/tty.h> 33 #include <linux/console.h> 34 #include <linux/slab.h> 35 #include <linux/rcupdate.h> 36 37 #include <asm/reg.h> 38 #include <linux/uaccess.h> 39 #include <asm/io.h> 40 #include <asm/hwrpb.h> 41 #include <asm/fpu.h> 42 43 #include "proto.h" 44 #include "pci_impl.h" 45 46 /* 47 * Power off function, if any 48 */ 49 void (*pm_power_off)(void) = machine_power_off; 50 EXPORT_SYMBOL(pm_power_off); 51 52 #ifdef CONFIG_ALPHA_WTINT 53 /* 54 * Sleep the CPU. 55 * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts. 56 */ 57 void arch_cpu_idle(void) 58 { 59 wtint(0); 60 } 61 62 void arch_cpu_idle_dead(void) 63 { 64 wtint(INT_MAX); 65 } 66 #endif /* ALPHA_WTINT */ 67 68 struct halt_info { 69 int mode; 70 char *restart_cmd; 71 }; 72 73 static void 74 common_shutdown_1(void *generic_ptr) 75 { 76 struct halt_info *how = (struct halt_info *)generic_ptr; 77 struct percpu_struct *cpup; 78 unsigned long *pflags, flags; 79 int cpuid = smp_processor_id(); 80 81 /* No point in taking interrupts anymore. */ 82 local_irq_disable(); 83 84 cpup = (struct percpu_struct *) 85 ((unsigned long)hwrpb + hwrpb->processor_offset 86 + hwrpb->processor_size * cpuid); 87 pflags = &cpup->flags; 88 flags = *pflags; 89 90 /* Clear reason to "default"; clear "bootstrap in progress". */ 91 flags &= ~0x00ff0001UL; 92 93 #ifdef CONFIG_SMP 94 /* Secondaries halt here. */ 95 if (cpuid != boot_cpuid) { 96 flags |= 0x00040000UL; /* "remain halted" */ 97 *pflags = flags; 98 set_cpu_present(cpuid, false); 99 set_cpu_possible(cpuid, false); 100 halt(); 101 } 102 #endif 103 104 if (how->mode == LINUX_REBOOT_CMD_RESTART) { 105 if (!how->restart_cmd) { 106 flags |= 0x00020000UL; /* "cold bootstrap" */ 107 } else { 108 /* For SRM, we could probably set environment 109 variables to get this to work. We'd have to 110 delay this until after srm_paging_stop unless 111 we ever got srm_fixup working. 112 113 At the moment, SRM will use the last boot device, 114 but the file and flags will be the defaults, when 115 doing a "warm" bootstrap. */ 116 flags |= 0x00030000UL; /* "warm bootstrap" */ 117 } 118 } else { 119 flags |= 0x00040000UL; /* "remain halted" */ 120 } 121 *pflags = flags; 122 123 #ifdef CONFIG_SMP 124 /* Wait for the secondaries to halt. */ 125 set_cpu_present(boot_cpuid, false); 126 set_cpu_possible(boot_cpuid, false); 127 while (!cpumask_empty(cpu_present_mask)) 128 barrier(); 129 #endif 130 131 /* If booted from SRM, reset some of the original environment. */ 132 if (alpha_using_srm) { 133 #ifdef CONFIG_DUMMY_CONSOLE 134 /* If we've gotten here after SysRq-b, leave interrupt 135 context before taking over the console. */ 136 if (in_irq()) 137 irq_exit(); 138 /* This has the effect of resetting the VGA video origin. */ 139 console_lock(); 140 do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); 141 console_unlock(); 142 #endif 143 pci_restore_srm_config(); 144 set_hae(srm_hae); 145 } 146 147 if (alpha_mv.kill_arch) 148 alpha_mv.kill_arch(how->mode); 149 150 if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { 151 /* Unfortunately, since MILO doesn't currently understand 152 the hwrpb bits above, we can't reliably halt the 153 processor and keep it halted. So just loop. */ 154 return; 155 } 156 157 if (alpha_using_srm) 158 srm_paging_stop(); 159 160 halt(); 161 } 162 163 static void 164 common_shutdown(int mode, char *restart_cmd) 165 { 166 struct halt_info args; 167 args.mode = mode; 168 args.restart_cmd = restart_cmd; 169 on_each_cpu(common_shutdown_1, &args, 0); 170 } 171 172 void 173 machine_restart(char *restart_cmd) 174 { 175 common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd); 176 } 177 178 179 void 180 machine_halt(void) 181 { 182 common_shutdown(LINUX_REBOOT_CMD_HALT, NULL); 183 } 184 185 186 void 187 machine_power_off(void) 188 { 189 common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL); 190 } 191 192 193 /* Used by sysrq-p, among others. I don't believe r9-r15 are ever 194 saved in the context it's used. */ 195 196 void 197 show_regs(struct pt_regs *regs) 198 { 199 show_regs_print_info(KERN_DEFAULT); 200 dik_show_regs(regs, NULL); 201 } 202 203 /* 204 * Re-start a thread when doing execve() 205 */ 206 void 207 start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 208 { 209 regs->pc = pc; 210 regs->ps = 8; 211 wrusp(sp); 212 } 213 EXPORT_SYMBOL(start_thread); 214 215 void 216 flush_thread(void) 217 { 218 /* Arrange for each exec'ed process to start off with a clean slate 219 with respect to the FPU. This is all exceptions disabled. */ 220 current_thread_info()->ieee_state = 0; 221 wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0)); 222 223 /* Clean slate for TLS. */ 224 current_thread_info()->pcb.unique = 0; 225 } 226 227 /* 228 * Copy architecture-specific thread state 229 */ 230 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 231 { 232 unsigned long clone_flags = args->flags; 233 unsigned long usp = args->stack; 234 unsigned long tls = args->tls; 235 extern void ret_from_fork(void); 236 extern void ret_from_kernel_thread(void); 237 238 struct thread_info *childti = task_thread_info(p); 239 struct pt_regs *childregs = task_pt_regs(p); 240 struct pt_regs *regs = current_pt_regs(); 241 struct switch_stack *childstack, *stack; 242 243 childstack = ((struct switch_stack *) childregs) - 1; 244 childti->pcb.ksp = (unsigned long) childstack; 245 childti->pcb.flags = 1; /* set FEN, clear everything else */ 246 247 if (unlikely(args->fn)) { 248 /* kernel thread */ 249 memset(childstack, 0, 250 sizeof(struct switch_stack) + sizeof(struct pt_regs)); 251 childstack->r26 = (unsigned long) ret_from_kernel_thread; 252 childstack->r9 = (unsigned long) args->fn; 253 childstack->r10 = (unsigned long) args->fn_arg; 254 childregs->hae = alpha_mv.hae_cache; 255 childti->pcb.usp = 0; 256 return 0; 257 } 258 /* Note: if CLONE_SETTLS is not set, then we must inherit the 259 value from the parent, which will have been set by the block 260 copy in dup_task_struct. This is non-intuitive, but is 261 required for proper operation in the case of a threaded 262 application calling fork. */ 263 if (clone_flags & CLONE_SETTLS) 264 childti->pcb.unique = tls; 265 else 266 regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ 267 childti->pcb.usp = usp ?: rdusp(); 268 *childregs = *regs; 269 childregs->r0 = 0; 270 childregs->r19 = 0; 271 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ 272 stack = ((struct switch_stack *) regs) - 1; 273 *childstack = *stack; 274 childstack->r26 = (unsigned long) ret_from_fork; 275 return 0; 276 } 277 278 /* 279 * Fill in the user structure for a ELF core dump. 280 */ 281 void 282 dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) 283 { 284 /* switch stack follows right below pt_regs: */ 285 struct switch_stack * sw = ((struct switch_stack *) pt) - 1; 286 287 dest[ 0] = pt->r0; 288 dest[ 1] = pt->r1; 289 dest[ 2] = pt->r2; 290 dest[ 3] = pt->r3; 291 dest[ 4] = pt->r4; 292 dest[ 5] = pt->r5; 293 dest[ 6] = pt->r6; 294 dest[ 7] = pt->r7; 295 dest[ 8] = pt->r8; 296 dest[ 9] = sw->r9; 297 dest[10] = sw->r10; 298 dest[11] = sw->r11; 299 dest[12] = sw->r12; 300 dest[13] = sw->r13; 301 dest[14] = sw->r14; 302 dest[15] = sw->r15; 303 dest[16] = pt->r16; 304 dest[17] = pt->r17; 305 dest[18] = pt->r18; 306 dest[19] = pt->r19; 307 dest[20] = pt->r20; 308 dest[21] = pt->r21; 309 dest[22] = pt->r22; 310 dest[23] = pt->r23; 311 dest[24] = pt->r24; 312 dest[25] = pt->r25; 313 dest[26] = pt->r26; 314 dest[27] = pt->r27; 315 dest[28] = pt->r28; 316 dest[29] = pt->gp; 317 dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; 318 dest[31] = pt->pc; 319 320 /* Once upon a time this was the PS value. Which is stupid 321 since that is always 8 for usermode. Usurped for the more 322 useful value of the thread's UNIQUE field. */ 323 dest[32] = ti->pcb.unique; 324 } 325 EXPORT_SYMBOL(dump_elf_thread); 326 327 int 328 dump_elf_task(elf_greg_t *dest, struct task_struct *task) 329 { 330 dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task)); 331 return 1; 332 } 333 EXPORT_SYMBOL(dump_elf_task); 334 335 int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 336 { 337 struct switch_stack *sw = (struct switch_stack *)task_pt_regs(t) - 1; 338 memcpy(fpu, sw->fp, 32 * 8); 339 return 1; 340 } 341 342 /* 343 * Return saved PC of a blocked thread. This assumes the frame 344 * pointer is the 6th saved long on the kernel stack and that the 345 * saved return address is the first long in the frame. This all 346 * holds provided the thread blocked through a call to schedule() ($15 347 * is the frame pointer in schedule() and $15 is saved at offset 48 by 348 * entry.S:do_switch_stack). 349 * 350 * Under heavy swap load I've seen this lose in an ugly way. So do 351 * some extra sanity checking on the ranges we expect these pointers 352 * to be in so that we can fail gracefully. This is just for ps after 353 * all. -- r~ 354 */ 355 356 static unsigned long 357 thread_saved_pc(struct task_struct *t) 358 { 359 unsigned long base = (unsigned long)task_stack_page(t); 360 unsigned long fp, sp = task_thread_info(t)->pcb.ksp; 361 362 if (sp > base && sp+6*8 < base + 16*1024) { 363 fp = ((unsigned long*)sp)[6]; 364 if (fp > sp && fp < base + 16*1024) 365 return *(unsigned long *)fp; 366 } 367 368 return 0; 369 } 370 371 unsigned long 372 __get_wchan(struct task_struct *p) 373 { 374 unsigned long schedule_frame; 375 unsigned long pc; 376 377 /* 378 * This one depends on the frame size of schedule(). Do a 379 * "disass schedule" in gdb to find the frame size. Also, the 380 * code assumes that sleep_on() follows immediately after 381 * interruptible_sleep_on() and that add_timer() follows 382 * immediately after interruptible_sleep(). Ugly, isn't it? 383 * Maybe adding a wchan field to task_struct would be better, 384 * after all... 385 */ 386 387 pc = thread_saved_pc(p); 388 if (in_sched_functions(pc)) { 389 schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6]; 390 return ((unsigned long *)schedule_frame)[12]; 391 } 392 return pc; 393 } 394