1 // SPDX-License-Identifier: GPL-2.0 2 /* arch/sparc64/kernel/process.c 3 * 4 * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9 /* 10 * This file handles the architecture-dependent parts of process handling.. 11 */ 12 13 #include <stdarg.h> 14 15 #include <linux/errno.h> 16 #include <linux/export.h> 17 #include <linux/sched.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/smp.h> 25 #include <linux/stddef.h> 26 #include <linux/ptrace.h> 27 #include <linux/slab.h> 28 #include <linux/user.h> 29 #include <linux/delay.h> 30 #include <linux/compat.h> 31 #include <linux/tick.h> 32 #include <linux/init.h> 33 #include <linux/cpu.h> 34 #include <linux/perf_event.h> 35 #include <linux/elfcore.h> 36 #include <linux/sysrq.h> 37 #include <linux/nmi.h> 38 #include <linux/context_tracking.h> 39 #include <linux/signal.h> 40 41 #include <linux/uaccess.h> 42 #include <asm/page.h> 43 #include <asm/pgalloc.h> 44 #include <asm/pgtable.h> 45 #include <asm/processor.h> 46 #include <asm/pstate.h> 47 #include <asm/elf.h> 48 #include <asm/fpumacro.h> 49 #include <asm/head.h> 50 #include <asm/cpudata.h> 51 #include <asm/mmu_context.h> 52 #include <asm/unistd.h> 53 #include <asm/hypervisor.h> 54 #include <asm/syscalls.h> 55 #include <asm/irq_regs.h> 56 #include <asm/smp.h> 57 #include <asm/pcr.h> 58 59 #include "kstack.h" 60 61 /* Idle loop support on sparc64. */ 62 void arch_cpu_idle(void) 63 { 64 if (tlb_type != hypervisor) { 65 touch_nmi_watchdog(); 66 local_irq_enable(); 67 } else { 68 unsigned long pstate; 69 70 local_irq_enable(); 71 72 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over 73 * the cpu sleep hypervisor call. 74 */ 75 __asm__ __volatile__( 76 "rdpr %%pstate, %0\n\t" 77 "andn %0, %1, %0\n\t" 78 "wrpr %0, %%g0, %%pstate" 79 : "=&r" (pstate) 80 : "i" (PSTATE_IE)); 81 82 if (!need_resched() && !cpu_is_offline(smp_processor_id())) { 83 sun4v_cpu_yield(); 84 /* If resumed by cpu_poke then we need to explicitly 85 * call scheduler_ipi(). 86 */ 87 scheduler_poke(); 88 } 89 90 /* Re-enable interrupts. */ 91 __asm__ __volatile__( 92 "rdpr %%pstate, %0\n\t" 93 "or %0, %1, %0\n\t" 94 "wrpr %0, %%g0, %%pstate" 95 : "=&r" (pstate) 96 : "i" (PSTATE_IE)); 97 } 98 } 99 100 #ifdef CONFIG_HOTPLUG_CPU 101 void arch_cpu_idle_dead(void) 102 { 103 sched_preempt_enable_no_resched(); 104 cpu_play_dead(); 105 } 106 #endif 107 108 #ifdef CONFIG_COMPAT 109 static void show_regwindow32(struct pt_regs *regs) 110 { 111 struct reg_window32 __user *rw; 112 struct reg_window32 r_w; 113 mm_segment_t old_fs; 114 115 __asm__ __volatile__ ("flushw"); 116 rw = compat_ptr((unsigned int)regs->u_regs[14]); 117 old_fs = get_fs(); 118 set_fs (USER_DS); 119 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 120 set_fs (old_fs); 121 return; 122 } 123 124 set_fs (old_fs); 125 printk("l0: %08x l1: %08x l2: %08x l3: %08x " 126 "l4: %08x l5: %08x l6: %08x l7: %08x\n", 127 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], 128 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); 129 printk("i0: %08x i1: %08x i2: %08x i3: %08x " 130 "i4: %08x i5: %08x i6: %08x i7: %08x\n", 131 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], 132 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); 133 } 134 #else 135 #define show_regwindow32(regs) do { } while (0) 136 #endif 137 138 static void show_regwindow(struct pt_regs *regs) 139 { 140 struct reg_window __user *rw; 141 struct reg_window *rwk; 142 struct reg_window r_w; 143 mm_segment_t old_fs; 144 145 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { 146 __asm__ __volatile__ ("flushw"); 147 rw = (struct reg_window __user *) 148 (regs->u_regs[14] + STACK_BIAS); 149 rwk = (struct reg_window *) 150 (regs->u_regs[14] + STACK_BIAS); 151 if (!(regs->tstate & TSTATE_PRIV)) { 152 old_fs = get_fs(); 153 set_fs (USER_DS); 154 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 155 set_fs (old_fs); 156 return; 157 } 158 rwk = &r_w; 159 set_fs (old_fs); 160 } 161 } else { 162 show_regwindow32(regs); 163 return; 164 } 165 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", 166 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); 167 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", 168 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); 169 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", 170 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); 171 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", 172 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); 173 if (regs->tstate & TSTATE_PRIV) 174 printk("I7: <%pS>\n", (void *) rwk->ins[7]); 175 } 176 177 void show_regs(struct pt_regs *regs) 178 { 179 show_regs_print_info(KERN_DEFAULT); 180 181 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 182 regs->tpc, regs->tnpc, regs->y, print_tainted()); 183 printk("TPC: <%pS>\n", (void *) regs->tpc); 184 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", 185 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], 186 regs->u_regs[3]); 187 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", 188 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], 189 regs->u_regs[7]); 190 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", 191 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], 192 regs->u_regs[11]); 193 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", 194 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], 195 regs->u_regs[15]); 196 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); 197 show_regwindow(regs); 198 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); 199 } 200 201 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; 202 static DEFINE_SPINLOCK(global_cpu_snapshot_lock); 203 204 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 205 int this_cpu) 206 { 207 struct global_reg_snapshot *rp; 208 209 flushw_all(); 210 211 rp = &global_cpu_snapshot[this_cpu].reg; 212 213 rp->tstate = regs->tstate; 214 rp->tpc = regs->tpc; 215 rp->tnpc = regs->tnpc; 216 rp->o7 = regs->u_regs[UREG_I7]; 217 218 if (regs->tstate & TSTATE_PRIV) { 219 struct reg_window *rw; 220 221 rw = (struct reg_window *) 222 (regs->u_regs[UREG_FP] + STACK_BIAS); 223 if (kstack_valid(tp, (unsigned long) rw)) { 224 rp->i7 = rw->ins[7]; 225 rw = (struct reg_window *) 226 (rw->ins[6] + STACK_BIAS); 227 if (kstack_valid(tp, (unsigned long) rw)) 228 rp->rpc = rw->ins[7]; 229 } 230 } else { 231 rp->i7 = 0; 232 rp->rpc = 0; 233 } 234 rp->thread = tp; 235 } 236 237 /* In order to avoid hangs we do not try to synchronize with the 238 * global register dump client cpus. The last store they make is to 239 * the thread pointer, so do a short poll waiting for that to become 240 * non-NULL. 241 */ 242 static void __global_reg_poll(struct global_reg_snapshot *gp) 243 { 244 int limit = 0; 245 246 while (!gp->thread && ++limit < 100) { 247 barrier(); 248 udelay(1); 249 } 250 } 251 252 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) 253 { 254 struct thread_info *tp = current_thread_info(); 255 struct pt_regs *regs = get_irq_regs(); 256 unsigned long flags; 257 int this_cpu, cpu; 258 259 if (!regs) 260 regs = tp->kregs; 261 262 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 263 264 this_cpu = raw_smp_processor_id(); 265 266 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 267 268 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) 269 __global_reg_self(tp, regs, this_cpu); 270 271 smp_fetch_global_regs(); 272 273 for_each_cpu(cpu, mask) { 274 struct global_reg_snapshot *gp; 275 276 if (exclude_self && cpu == this_cpu) 277 continue; 278 279 gp = &global_cpu_snapshot[cpu].reg; 280 281 __global_reg_poll(gp); 282 283 tp = gp->thread; 284 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", 285 (cpu == this_cpu ? '*' : ' '), cpu, 286 gp->tstate, gp->tpc, gp->tnpc, 287 ((tp && tp->task) ? tp->task->comm : "NULL"), 288 ((tp && tp->task) ? tp->task->pid : -1)); 289 290 if (gp->tstate & TSTATE_PRIV) { 291 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", 292 (void *) gp->tpc, 293 (void *) gp->o7, 294 (void *) gp->i7, 295 (void *) gp->rpc); 296 } else { 297 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 298 gp->tpc, gp->o7, gp->i7, gp->rpc); 299 } 300 301 touch_nmi_watchdog(); 302 } 303 304 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 305 306 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); 307 } 308 309 #ifdef CONFIG_MAGIC_SYSRQ 310 311 static void sysrq_handle_globreg(int key) 312 { 313 trigger_all_cpu_backtrace(); 314 } 315 316 static struct sysrq_key_op sparc_globalreg_op = { 317 .handler = sysrq_handle_globreg, 318 .help_msg = "global-regs(y)", 319 .action_msg = "Show Global CPU Regs", 320 }; 321 322 static void __global_pmu_self(int this_cpu) 323 { 324 struct global_pmu_snapshot *pp; 325 int i, num; 326 327 if (!pcr_ops) 328 return; 329 330 pp = &global_cpu_snapshot[this_cpu].pmu; 331 332 num = 1; 333 if (tlb_type == hypervisor && 334 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 335 num = 4; 336 337 for (i = 0; i < num; i++) { 338 pp->pcr[i] = pcr_ops->read_pcr(i); 339 pp->pic[i] = pcr_ops->read_pic(i); 340 } 341 } 342 343 static void __global_pmu_poll(struct global_pmu_snapshot *pp) 344 { 345 int limit = 0; 346 347 while (!pp->pcr[0] && ++limit < 100) { 348 barrier(); 349 udelay(1); 350 } 351 } 352 353 static void pmu_snapshot_all_cpus(void) 354 { 355 unsigned long flags; 356 int this_cpu, cpu; 357 358 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 359 360 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 361 362 this_cpu = raw_smp_processor_id(); 363 364 __global_pmu_self(this_cpu); 365 366 smp_fetch_global_pmu(); 367 368 for_each_online_cpu(cpu) { 369 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; 370 371 __global_pmu_poll(pp); 372 373 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", 374 (cpu == this_cpu ? '*' : ' '), cpu, 375 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], 376 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); 377 378 touch_nmi_watchdog(); 379 } 380 381 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 382 383 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); 384 } 385 386 static void sysrq_handle_globpmu(int key) 387 { 388 pmu_snapshot_all_cpus(); 389 } 390 391 static struct sysrq_key_op sparc_globalpmu_op = { 392 .handler = sysrq_handle_globpmu, 393 .help_msg = "global-pmu(x)", 394 .action_msg = "Show Global PMU Regs", 395 }; 396 397 static int __init sparc_sysrq_init(void) 398 { 399 int ret = register_sysrq_key('y', &sparc_globalreg_op); 400 401 if (!ret) 402 ret = register_sysrq_key('x', &sparc_globalpmu_op); 403 return ret; 404 } 405 406 core_initcall(sparc_sysrq_init); 407 408 #endif 409 410 /* Free current thread data structures etc.. */ 411 void exit_thread(struct task_struct *tsk) 412 { 413 struct thread_info *t = task_thread_info(tsk); 414 415 if (t->utraps) { 416 if (t->utraps[0] < 2) 417 kfree (t->utraps); 418 else 419 t->utraps[0]--; 420 } 421 } 422 423 void flush_thread(void) 424 { 425 struct thread_info *t = current_thread_info(); 426 struct mm_struct *mm; 427 428 mm = t->task->mm; 429 if (mm) 430 tsb_context_switch(mm); 431 432 set_thread_wsaved(0); 433 434 /* Clear FPU register state. */ 435 t->fpsaved[0] = 0; 436 } 437 438 /* It's a bit more tricky when 64-bit tasks are involved... */ 439 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 440 { 441 bool stack_64bit = test_thread_64bit_stack(psp); 442 unsigned long fp, distance, rval; 443 444 if (stack_64bit) { 445 csp += STACK_BIAS; 446 psp += STACK_BIAS; 447 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 448 fp += STACK_BIAS; 449 if (test_thread_flag(TIF_32BIT)) 450 fp &= 0xffffffff; 451 } else 452 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 453 454 /* Now align the stack as this is mandatory in the Sparc ABI 455 * due to how register windows work. This hides the 456 * restriction from thread libraries etc. 457 */ 458 csp &= ~15UL; 459 460 distance = fp - psp; 461 rval = (csp - distance); 462 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 463 rval = 0; 464 else if (!stack_64bit) { 465 if (put_user(((u32)csp), 466 &(((struct reg_window32 __user *)rval)->ins[6]))) 467 rval = 0; 468 } else { 469 if (put_user(((u64)csp - STACK_BIAS), 470 &(((struct reg_window __user *)rval)->ins[6]))) 471 rval = 0; 472 else 473 rval = rval - STACK_BIAS; 474 } 475 476 return rval; 477 } 478 479 /* Standard stuff. */ 480 static inline void shift_window_buffer(int first_win, int last_win, 481 struct thread_info *t) 482 { 483 int i; 484 485 for (i = first_win; i < last_win; i++) { 486 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; 487 memcpy(&t->reg_window[i], &t->reg_window[i+1], 488 sizeof(struct reg_window)); 489 } 490 } 491 492 void synchronize_user_stack(void) 493 { 494 struct thread_info *t = current_thread_info(); 495 unsigned long window; 496 497 flush_user_windows(); 498 if ((window = get_thread_wsaved()) != 0) { 499 window -= 1; 500 do { 501 struct reg_window *rwin = &t->reg_window[window]; 502 int winsize = sizeof(struct reg_window); 503 unsigned long sp; 504 505 sp = t->rwbuf_stkptrs[window]; 506 507 if (test_thread_64bit_stack(sp)) 508 sp += STACK_BIAS; 509 else 510 winsize = sizeof(struct reg_window32); 511 512 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 513 shift_window_buffer(window, get_thread_wsaved() - 1, t); 514 set_thread_wsaved(get_thread_wsaved() - 1); 515 } 516 } while (window--); 517 } 518 } 519 520 static void stack_unaligned(unsigned long sp) 521 { 522 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0); 523 } 524 525 static const char uwfault32[] = KERN_INFO \ 526 "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n"; 527 static const char uwfault64[] = KERN_INFO \ 528 "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n"; 529 530 void fault_in_user_windows(struct pt_regs *regs) 531 { 532 struct thread_info *t = current_thread_info(); 533 unsigned long window; 534 535 flush_user_windows(); 536 window = get_thread_wsaved(); 537 538 if (likely(window != 0)) { 539 window -= 1; 540 do { 541 struct reg_window *rwin = &t->reg_window[window]; 542 int winsize = sizeof(struct reg_window); 543 unsigned long sp, orig_sp; 544 545 orig_sp = sp = t->rwbuf_stkptrs[window]; 546 547 if (test_thread_64bit_stack(sp)) 548 sp += STACK_BIAS; 549 else 550 winsize = sizeof(struct reg_window32); 551 552 if (unlikely(sp & 0x7UL)) 553 stack_unaligned(sp); 554 555 if (unlikely(copy_to_user((char __user *)sp, 556 rwin, winsize))) { 557 if (show_unhandled_signals) 558 printk_ratelimited(is_compat_task() ? 559 uwfault32 : uwfault64, 560 current->comm, current->pid, 561 sp, orig_sp, 562 regs->tpc, 563 regs->u_regs[UREG_I7]); 564 goto barf; 565 } 566 } while (window--); 567 } 568 set_thread_wsaved(0); 569 return; 570 571 barf: 572 set_thread_wsaved(window + 1); 573 force_sig(SIGSEGV); 574 } 575 576 asmlinkage long sparc_do_fork(unsigned long clone_flags, 577 unsigned long stack_start, 578 struct pt_regs *regs, 579 unsigned long stack_size) 580 { 581 int __user *parent_tid_ptr, *child_tid_ptr; 582 unsigned long orig_i1 = regs->u_regs[UREG_I1]; 583 long ret; 584 585 #ifdef CONFIG_COMPAT 586 if (test_thread_flag(TIF_32BIT)) { 587 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); 588 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); 589 } else 590 #endif 591 { 592 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; 593 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; 594 } 595 596 ret = do_fork(clone_flags, stack_start, stack_size, 597 parent_tid_ptr, child_tid_ptr); 598 599 /* If we get an error and potentially restart the system 600 * call, we're screwed because copy_thread() clobbered 601 * the parent's %o1. So detect that case and restore it 602 * here. 603 */ 604 if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) 605 regs->u_regs[UREG_I1] = orig_i1; 606 607 return ret; 608 } 609 610 /* Copy a Sparc thread. The fork() return value conventions 611 * under SunOS are nothing short of bletcherous: 612 * Parent --> %o0 == childs pid, %o1 == 0 613 * Child --> %o0 == parents pid, %o1 == 1 614 */ 615 int copy_thread(unsigned long clone_flags, unsigned long sp, 616 unsigned long arg, struct task_struct *p) 617 { 618 struct thread_info *t = task_thread_info(p); 619 struct pt_regs *regs = current_pt_regs(); 620 struct sparc_stackf *parent_sf; 621 unsigned long child_stack_sz; 622 char *child_trap_frame; 623 624 /* Calculate offset to stack_frame & pt_regs */ 625 child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); 626 child_trap_frame = (task_stack_page(p) + 627 (THREAD_SIZE - child_stack_sz)); 628 629 t->new_child = 1; 630 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; 631 t->kregs = (struct pt_regs *) (child_trap_frame + 632 sizeof(struct sparc_stackf)); 633 t->fpsaved[0] = 0; 634 635 if (unlikely(p->flags & PF_KTHREAD)) { 636 memset(child_trap_frame, 0, child_stack_sz); 637 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 638 (current_pt_regs()->tstate + 1) & TSTATE_CWP; 639 t->current_ds = ASI_P; 640 t->kregs->u_regs[UREG_G1] = sp; /* function */ 641 t->kregs->u_regs[UREG_G2] = arg; 642 return 0; 643 } 644 645 parent_sf = ((struct sparc_stackf *) regs) - 1; 646 memcpy(child_trap_frame, parent_sf, child_stack_sz); 647 if (t->flags & _TIF_32BIT) { 648 sp &= 0x00000000ffffffffUL; 649 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; 650 } 651 t->kregs->u_regs[UREG_FP] = sp; 652 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 653 (regs->tstate + 1) & TSTATE_CWP; 654 t->current_ds = ASI_AIUS; 655 if (sp != regs->u_regs[UREG_FP]) { 656 unsigned long csp; 657 658 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); 659 if (!csp) 660 return -EFAULT; 661 t->kregs->u_regs[UREG_FP] = csp; 662 } 663 if (t->utraps) 664 t->utraps[0]++; 665 666 /* Set the return value for the child. */ 667 t->kregs->u_regs[UREG_I0] = current->pid; 668 t->kregs->u_regs[UREG_I1] = 1; 669 670 /* Set the second return value for the parent. */ 671 regs->u_regs[UREG_I1] = 0; 672 673 if (clone_flags & CLONE_SETTLS) 674 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; 675 676 return 0; 677 } 678 679 /* TIF_MCDPER in thread info flags for current task is updated lazily upon 680 * a context switch. Update this flag in current task's thread flags 681 * before dup so the dup'd task will inherit the current TIF_MCDPER flag. 682 */ 683 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 684 { 685 if (adi_capable()) { 686 register unsigned long tmp_mcdper; 687 688 __asm__ __volatile__( 689 ".word 0x83438000\n\t" /* rd %mcdper, %g1 */ 690 "mov %%g1, %0\n\t" 691 : "=r" (tmp_mcdper) 692 : 693 : "g1"); 694 if (tmp_mcdper) 695 set_thread_flag(TIF_MCDPER); 696 else 697 clear_thread_flag(TIF_MCDPER); 698 } 699 700 *dst = *src; 701 return 0; 702 } 703 704 typedef struct { 705 union { 706 unsigned int pr_regs[32]; 707 unsigned long pr_dregs[16]; 708 } pr_fr; 709 unsigned int __unused; 710 unsigned int pr_fsr; 711 unsigned char pr_qcnt; 712 unsigned char pr_q_entrysize; 713 unsigned char pr_en; 714 unsigned int pr_q[64]; 715 } elf_fpregset_t32; 716 717 /* 718 * fill in the fpu structure for a core dump. 719 */ 720 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) 721 { 722 unsigned long *kfpregs = current_thread_info()->fpregs; 723 unsigned long fprs = current_thread_info()->fpsaved[0]; 724 725 if (test_thread_flag(TIF_32BIT)) { 726 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; 727 728 if (fprs & FPRS_DL) 729 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, 730 sizeof(unsigned int) * 32); 731 else 732 memset(&fpregs32->pr_fr.pr_regs[0], 0, 733 sizeof(unsigned int) * 32); 734 fpregs32->pr_qcnt = 0; 735 fpregs32->pr_q_entrysize = 8; 736 memset(&fpregs32->pr_q[0], 0, 737 (sizeof(unsigned int) * 64)); 738 if (fprs & FPRS_FEF) { 739 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; 740 fpregs32->pr_en = 1; 741 } else { 742 fpregs32->pr_fsr = 0; 743 fpregs32->pr_en = 0; 744 } 745 } else { 746 if(fprs & FPRS_DL) 747 memcpy(&fpregs->pr_regs[0], kfpregs, 748 sizeof(unsigned int) * 32); 749 else 750 memset(&fpregs->pr_regs[0], 0, 751 sizeof(unsigned int) * 32); 752 if(fprs & FPRS_DU) 753 memcpy(&fpregs->pr_regs[16], kfpregs+16, 754 sizeof(unsigned int) * 32); 755 else 756 memset(&fpregs->pr_regs[16], 0, 757 sizeof(unsigned int) * 32); 758 if(fprs & FPRS_FEF) { 759 fpregs->pr_fsr = current_thread_info()->xfsr[0]; 760 fpregs->pr_gsr = current_thread_info()->gsr[0]; 761 } else { 762 fpregs->pr_fsr = fpregs->pr_gsr = 0; 763 } 764 fpregs->pr_fprs = fprs; 765 } 766 return 1; 767 } 768 EXPORT_SYMBOL(dump_fpu); 769 770 unsigned long get_wchan(struct task_struct *task) 771 { 772 unsigned long pc, fp, bias = 0; 773 struct thread_info *tp; 774 struct reg_window *rw; 775 unsigned long ret = 0; 776 int count = 0; 777 778 if (!task || task == current || 779 task->state == TASK_RUNNING) 780 goto out; 781 782 tp = task_thread_info(task); 783 bias = STACK_BIAS; 784 fp = task_thread_info(task)->ksp + bias; 785 786 do { 787 if (!kstack_valid(tp, fp)) 788 break; 789 rw = (struct reg_window *) fp; 790 pc = rw->ins[7]; 791 if (!in_sched_functions(pc)) { 792 ret = pc; 793 goto out; 794 } 795 fp = rw->ins[6] + bias; 796 } while (++count < 16); 797 798 out: 799 return ret; 800 } 801