1 /* arch/sparc64/kernel/process.c 2 * 3 * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 /* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12 #include <stdarg.h> 13 14 #include <linux/errno.h> 15 #include <linux/export.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/smp.h> 21 #include <linux/stddef.h> 22 #include <linux/ptrace.h> 23 #include <linux/slab.h> 24 #include <linux/user.h> 25 #include <linux/delay.h> 26 #include <linux/compat.h> 27 #include <linux/tick.h> 28 #include <linux/init.h> 29 #include <linux/cpu.h> 30 #include <linux/perf_event.h> 31 #include <linux/elfcore.h> 32 #include <linux/sysrq.h> 33 #include <linux/nmi.h> 34 35 #include <asm/uaccess.h> 36 #include <asm/page.h> 37 #include <asm/pgalloc.h> 38 #include <asm/pgtable.h> 39 #include <asm/processor.h> 40 #include <asm/pstate.h> 41 #include <asm/elf.h> 42 #include <asm/fpumacro.h> 43 #include <asm/head.h> 44 #include <asm/cpudata.h> 45 #include <asm/mmu_context.h> 46 #include <asm/unistd.h> 47 #include <asm/hypervisor.h> 48 #include <asm/syscalls.h> 49 #include <asm/irq_regs.h> 50 #include <asm/smp.h> 51 #include <asm/pcr.h> 52 53 #include "kstack.h" 54 55 static void sparc64_yield(int cpu) 56 { 57 if (tlb_type != hypervisor) { 58 touch_nmi_watchdog(); 59 return; 60 } 61 62 clear_thread_flag(TIF_POLLING_NRFLAG); 63 smp_mb__after_clear_bit(); 64 65 while (!need_resched() && !cpu_is_offline(cpu)) { 66 unsigned long pstate; 67 68 /* Disable interrupts. */ 69 __asm__ __volatile__( 70 "rdpr %%pstate, %0\n\t" 71 "andn %0, %1, %0\n\t" 72 "wrpr %0, %%g0, %%pstate" 73 : "=&r" (pstate) 74 : "i" (PSTATE_IE)); 75 76 if (!need_resched() && !cpu_is_offline(cpu)) 77 sun4v_cpu_yield(); 78 79 /* Re-enable interrupts. */ 80 __asm__ __volatile__( 81 "rdpr %%pstate, %0\n\t" 82 "or %0, %1, %0\n\t" 83 "wrpr %0, %%g0, %%pstate" 84 : "=&r" (pstate) 85 : "i" (PSTATE_IE)); 86 } 87 88 set_thread_flag(TIF_POLLING_NRFLAG); 89 } 90 91 /* The idle loop on sparc64. */ 92 void cpu_idle(void) 93 { 94 int cpu = smp_processor_id(); 95 96 set_thread_flag(TIF_POLLING_NRFLAG); 97 98 while(1) { 99 tick_nohz_idle_enter(); 100 rcu_idle_enter(); 101 102 while (!need_resched() && !cpu_is_offline(cpu)) 103 sparc64_yield(cpu); 104 105 rcu_idle_exit(); 106 tick_nohz_idle_exit(); 107 108 #ifdef CONFIG_HOTPLUG_CPU 109 if (cpu_is_offline(cpu)) { 110 sched_preempt_enable_no_resched(); 111 cpu_play_dead(); 112 } 113 #endif 114 schedule_preempt_disabled(); 115 } 116 } 117 118 #ifdef CONFIG_COMPAT 119 static void show_regwindow32(struct pt_regs *regs) 120 { 121 struct reg_window32 __user *rw; 122 struct reg_window32 r_w; 123 mm_segment_t old_fs; 124 125 __asm__ __volatile__ ("flushw"); 126 rw = compat_ptr((unsigned)regs->u_regs[14]); 127 old_fs = get_fs(); 128 set_fs (USER_DS); 129 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 130 set_fs (old_fs); 131 return; 132 } 133 134 set_fs (old_fs); 135 printk("l0: %08x l1: %08x l2: %08x l3: %08x " 136 "l4: %08x l5: %08x l6: %08x l7: %08x\n", 137 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], 138 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); 139 printk("i0: %08x i1: %08x i2: %08x i3: %08x " 140 "i4: %08x i5: %08x i6: %08x i7: %08x\n", 141 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], 142 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); 143 } 144 #else 145 #define show_regwindow32(regs) do { } while (0) 146 #endif 147 148 static void show_regwindow(struct pt_regs *regs) 149 { 150 struct reg_window __user *rw; 151 struct reg_window *rwk; 152 struct reg_window r_w; 153 mm_segment_t old_fs; 154 155 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { 156 __asm__ __volatile__ ("flushw"); 157 rw = (struct reg_window __user *) 158 (regs->u_regs[14] + STACK_BIAS); 159 rwk = (struct reg_window *) 160 (regs->u_regs[14] + STACK_BIAS); 161 if (!(regs->tstate & TSTATE_PRIV)) { 162 old_fs = get_fs(); 163 set_fs (USER_DS); 164 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 165 set_fs (old_fs); 166 return; 167 } 168 rwk = &r_w; 169 set_fs (old_fs); 170 } 171 } else { 172 show_regwindow32(regs); 173 return; 174 } 175 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", 176 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); 177 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", 178 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); 179 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", 180 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); 181 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", 182 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); 183 if (regs->tstate & TSTATE_PRIV) 184 printk("I7: <%pS>\n", (void *) rwk->ins[7]); 185 } 186 187 void show_regs(struct pt_regs *regs) 188 { 189 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 190 regs->tpc, regs->tnpc, regs->y, print_tainted()); 191 printk("TPC: <%pS>\n", (void *) regs->tpc); 192 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", 193 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], 194 regs->u_regs[3]); 195 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", 196 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], 197 regs->u_regs[7]); 198 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", 199 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], 200 regs->u_regs[11]); 201 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", 202 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], 203 regs->u_regs[15]); 204 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); 205 show_regwindow(regs); 206 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); 207 } 208 209 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; 210 static DEFINE_SPINLOCK(global_cpu_snapshot_lock); 211 212 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 213 int this_cpu) 214 { 215 struct global_reg_snapshot *rp; 216 217 flushw_all(); 218 219 rp = &global_cpu_snapshot[this_cpu].reg; 220 221 rp->tstate = regs->tstate; 222 rp->tpc = regs->tpc; 223 rp->tnpc = regs->tnpc; 224 rp->o7 = regs->u_regs[UREG_I7]; 225 226 if (regs->tstate & TSTATE_PRIV) { 227 struct reg_window *rw; 228 229 rw = (struct reg_window *) 230 (regs->u_regs[UREG_FP] + STACK_BIAS); 231 if (kstack_valid(tp, (unsigned long) rw)) { 232 rp->i7 = rw->ins[7]; 233 rw = (struct reg_window *) 234 (rw->ins[6] + STACK_BIAS); 235 if (kstack_valid(tp, (unsigned long) rw)) 236 rp->rpc = rw->ins[7]; 237 } 238 } else { 239 rp->i7 = 0; 240 rp->rpc = 0; 241 } 242 rp->thread = tp; 243 } 244 245 /* In order to avoid hangs we do not try to synchronize with the 246 * global register dump client cpus. The last store they make is to 247 * the thread pointer, so do a short poll waiting for that to become 248 * non-NULL. 249 */ 250 static void __global_reg_poll(struct global_reg_snapshot *gp) 251 { 252 int limit = 0; 253 254 while (!gp->thread && ++limit < 100) { 255 barrier(); 256 udelay(1); 257 } 258 } 259 260 void arch_trigger_all_cpu_backtrace(void) 261 { 262 struct thread_info *tp = current_thread_info(); 263 struct pt_regs *regs = get_irq_regs(); 264 unsigned long flags; 265 int this_cpu, cpu; 266 267 if (!regs) 268 regs = tp->kregs; 269 270 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 271 272 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 273 274 this_cpu = raw_smp_processor_id(); 275 276 __global_reg_self(tp, regs, this_cpu); 277 278 smp_fetch_global_regs(); 279 280 for_each_online_cpu(cpu) { 281 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg; 282 283 __global_reg_poll(gp); 284 285 tp = gp->thread; 286 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", 287 (cpu == this_cpu ? '*' : ' '), cpu, 288 gp->tstate, gp->tpc, gp->tnpc, 289 ((tp && tp->task) ? tp->task->comm : "NULL"), 290 ((tp && tp->task) ? tp->task->pid : -1)); 291 292 if (gp->tstate & TSTATE_PRIV) { 293 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", 294 (void *) gp->tpc, 295 (void *) gp->o7, 296 (void *) gp->i7, 297 (void *) gp->rpc); 298 } else { 299 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 300 gp->tpc, gp->o7, gp->i7, gp->rpc); 301 } 302 } 303 304 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 305 306 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); 307 } 308 309 #ifdef CONFIG_MAGIC_SYSRQ 310 311 static void sysrq_handle_globreg(int key) 312 { 313 arch_trigger_all_cpu_backtrace(); 314 } 315 316 static struct sysrq_key_op sparc_globalreg_op = { 317 .handler = sysrq_handle_globreg, 318 .help_msg = "global-regs(Y)", 319 .action_msg = "Show Global CPU Regs", 320 }; 321 322 static void __global_pmu_self(int this_cpu) 323 { 324 struct global_pmu_snapshot *pp; 325 int i, num; 326 327 pp = &global_cpu_snapshot[this_cpu].pmu; 328 329 num = 1; 330 if (tlb_type == hypervisor && 331 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 332 num = 4; 333 334 for (i = 0; i < num; i++) { 335 pp->pcr[i] = pcr_ops->read_pcr(i); 336 pp->pic[i] = pcr_ops->read_pic(i); 337 } 338 } 339 340 static void __global_pmu_poll(struct global_pmu_snapshot *pp) 341 { 342 int limit = 0; 343 344 while (!pp->pcr[0] && ++limit < 100) { 345 barrier(); 346 udelay(1); 347 } 348 } 349 350 static void pmu_snapshot_all_cpus(void) 351 { 352 unsigned long flags; 353 int this_cpu, cpu; 354 355 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 356 357 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 358 359 this_cpu = raw_smp_processor_id(); 360 361 __global_pmu_self(this_cpu); 362 363 smp_fetch_global_pmu(); 364 365 for_each_online_cpu(cpu) { 366 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; 367 368 __global_pmu_poll(pp); 369 370 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", 371 (cpu == this_cpu ? '*' : ' '), cpu, 372 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], 373 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); 374 } 375 376 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 377 378 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); 379 } 380 381 static void sysrq_handle_globpmu(int key) 382 { 383 pmu_snapshot_all_cpus(); 384 } 385 386 static struct sysrq_key_op sparc_globalpmu_op = { 387 .handler = sysrq_handle_globpmu, 388 .help_msg = "global-pmu(X)", 389 .action_msg = "Show Global PMU Regs", 390 }; 391 392 static int __init sparc_sysrq_init(void) 393 { 394 int ret = register_sysrq_key('y', &sparc_globalreg_op); 395 396 if (!ret) 397 ret = register_sysrq_key('x', &sparc_globalpmu_op); 398 return ret; 399 } 400 401 core_initcall(sparc_sysrq_init); 402 403 #endif 404 405 unsigned long thread_saved_pc(struct task_struct *tsk) 406 { 407 struct thread_info *ti = task_thread_info(tsk); 408 unsigned long ret = 0xdeadbeefUL; 409 410 if (ti && ti->ksp) { 411 unsigned long *sp; 412 sp = (unsigned long *)(ti->ksp + STACK_BIAS); 413 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && 414 sp[14]) { 415 unsigned long *fp; 416 fp = (unsigned long *)(sp[14] + STACK_BIAS); 417 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) 418 ret = fp[15]; 419 } 420 } 421 return ret; 422 } 423 424 /* Free current thread data structures etc.. */ 425 void exit_thread(void) 426 { 427 struct thread_info *t = current_thread_info(); 428 429 if (t->utraps) { 430 if (t->utraps[0] < 2) 431 kfree (t->utraps); 432 else 433 t->utraps[0]--; 434 } 435 } 436 437 void flush_thread(void) 438 { 439 struct thread_info *t = current_thread_info(); 440 struct mm_struct *mm; 441 442 mm = t->task->mm; 443 if (mm) 444 tsb_context_switch(mm); 445 446 set_thread_wsaved(0); 447 448 /* Clear FPU register state. */ 449 t->fpsaved[0] = 0; 450 } 451 452 /* It's a bit more tricky when 64-bit tasks are involved... */ 453 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 454 { 455 bool stack_64bit = test_thread_64bit_stack(psp); 456 unsigned long fp, distance, rval; 457 458 if (stack_64bit) { 459 csp += STACK_BIAS; 460 psp += STACK_BIAS; 461 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 462 fp += STACK_BIAS; 463 if (test_thread_flag(TIF_32BIT)) 464 fp &= 0xffffffff; 465 } else 466 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 467 468 /* Now align the stack as this is mandatory in the Sparc ABI 469 * due to how register windows work. This hides the 470 * restriction from thread libraries etc. 471 */ 472 csp &= ~15UL; 473 474 distance = fp - psp; 475 rval = (csp - distance); 476 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 477 rval = 0; 478 else if (!stack_64bit) { 479 if (put_user(((u32)csp), 480 &(((struct reg_window32 __user *)rval)->ins[6]))) 481 rval = 0; 482 } else { 483 if (put_user(((u64)csp - STACK_BIAS), 484 &(((struct reg_window __user *)rval)->ins[6]))) 485 rval = 0; 486 else 487 rval = rval - STACK_BIAS; 488 } 489 490 return rval; 491 } 492 493 /* Standard stuff. */ 494 static inline void shift_window_buffer(int first_win, int last_win, 495 struct thread_info *t) 496 { 497 int i; 498 499 for (i = first_win; i < last_win; i++) { 500 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; 501 memcpy(&t->reg_window[i], &t->reg_window[i+1], 502 sizeof(struct reg_window)); 503 } 504 } 505 506 void synchronize_user_stack(void) 507 { 508 struct thread_info *t = current_thread_info(); 509 unsigned long window; 510 511 flush_user_windows(); 512 if ((window = get_thread_wsaved()) != 0) { 513 window -= 1; 514 do { 515 struct reg_window *rwin = &t->reg_window[window]; 516 int winsize = sizeof(struct reg_window); 517 unsigned long sp; 518 519 sp = t->rwbuf_stkptrs[window]; 520 521 if (test_thread_64bit_stack(sp)) 522 sp += STACK_BIAS; 523 else 524 winsize = sizeof(struct reg_window32); 525 526 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 527 shift_window_buffer(window, get_thread_wsaved() - 1, t); 528 set_thread_wsaved(get_thread_wsaved() - 1); 529 } 530 } while (window--); 531 } 532 } 533 534 static void stack_unaligned(unsigned long sp) 535 { 536 siginfo_t info; 537 538 info.si_signo = SIGBUS; 539 info.si_errno = 0; 540 info.si_code = BUS_ADRALN; 541 info.si_addr = (void __user *) sp; 542 info.si_trapno = 0; 543 force_sig_info(SIGBUS, &info, current); 544 } 545 546 void fault_in_user_windows(void) 547 { 548 struct thread_info *t = current_thread_info(); 549 unsigned long window; 550 551 flush_user_windows(); 552 window = get_thread_wsaved(); 553 554 if (likely(window != 0)) { 555 window -= 1; 556 do { 557 struct reg_window *rwin = &t->reg_window[window]; 558 int winsize = sizeof(struct reg_window); 559 unsigned long sp; 560 561 sp = t->rwbuf_stkptrs[window]; 562 563 if (test_thread_64bit_stack(sp)) 564 sp += STACK_BIAS; 565 else 566 winsize = sizeof(struct reg_window32); 567 568 if (unlikely(sp & 0x7UL)) 569 stack_unaligned(sp); 570 571 if (unlikely(copy_to_user((char __user *)sp, 572 rwin, winsize))) 573 goto barf; 574 } while (window--); 575 } 576 set_thread_wsaved(0); 577 return; 578 579 barf: 580 set_thread_wsaved(window + 1); 581 do_exit(SIGILL); 582 } 583 584 asmlinkage long sparc_do_fork(unsigned long clone_flags, 585 unsigned long stack_start, 586 struct pt_regs *regs, 587 unsigned long stack_size) 588 { 589 int __user *parent_tid_ptr, *child_tid_ptr; 590 unsigned long orig_i1 = regs->u_regs[UREG_I1]; 591 long ret; 592 593 #ifdef CONFIG_COMPAT 594 if (test_thread_flag(TIF_32BIT)) { 595 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); 596 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); 597 } else 598 #endif 599 { 600 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; 601 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; 602 } 603 604 ret = do_fork(clone_flags, stack_start, stack_size, 605 parent_tid_ptr, child_tid_ptr); 606 607 /* If we get an error and potentially restart the system 608 * call, we're screwed because copy_thread() clobbered 609 * the parent's %o1. So detect that case and restore it 610 * here. 611 */ 612 if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) 613 regs->u_regs[UREG_I1] = orig_i1; 614 615 return ret; 616 } 617 618 /* Copy a Sparc thread. The fork() return value conventions 619 * under SunOS are nothing short of bletcherous: 620 * Parent --> %o0 == childs pid, %o1 == 0 621 * Child --> %o0 == parents pid, %o1 == 1 622 */ 623 int copy_thread(unsigned long clone_flags, unsigned long sp, 624 unsigned long arg, struct task_struct *p) 625 { 626 struct thread_info *t = task_thread_info(p); 627 struct pt_regs *regs = current_pt_regs(); 628 struct sparc_stackf *parent_sf; 629 unsigned long child_stack_sz; 630 char *child_trap_frame; 631 632 /* Calculate offset to stack_frame & pt_regs */ 633 child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); 634 child_trap_frame = (task_stack_page(p) + 635 (THREAD_SIZE - child_stack_sz)); 636 637 t->new_child = 1; 638 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; 639 t->kregs = (struct pt_regs *) (child_trap_frame + 640 sizeof(struct sparc_stackf)); 641 t->fpsaved[0] = 0; 642 643 if (unlikely(p->flags & PF_KTHREAD)) { 644 memset(child_trap_frame, 0, child_stack_sz); 645 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 646 (current_pt_regs()->tstate + 1) & TSTATE_CWP; 647 t->current_ds = ASI_P; 648 t->kregs->u_regs[UREG_G1] = sp; /* function */ 649 t->kregs->u_regs[UREG_G2] = arg; 650 return 0; 651 } 652 653 parent_sf = ((struct sparc_stackf *) regs) - 1; 654 memcpy(child_trap_frame, parent_sf, child_stack_sz); 655 if (t->flags & _TIF_32BIT) { 656 sp &= 0x00000000ffffffffUL; 657 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; 658 } 659 t->kregs->u_regs[UREG_FP] = sp; 660 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 661 (regs->tstate + 1) & TSTATE_CWP; 662 t->current_ds = ASI_AIUS; 663 if (sp != regs->u_regs[UREG_FP]) { 664 unsigned long csp; 665 666 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); 667 if (!csp) 668 return -EFAULT; 669 t->kregs->u_regs[UREG_FP] = csp; 670 } 671 if (t->utraps) 672 t->utraps[0]++; 673 674 /* Set the return value for the child. */ 675 t->kregs->u_regs[UREG_I0] = current->pid; 676 t->kregs->u_regs[UREG_I1] = 1; 677 678 /* Set the second return value for the parent. */ 679 regs->u_regs[UREG_I1] = 0; 680 681 if (clone_flags & CLONE_SETTLS) 682 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; 683 684 return 0; 685 } 686 687 typedef struct { 688 union { 689 unsigned int pr_regs[32]; 690 unsigned long pr_dregs[16]; 691 } pr_fr; 692 unsigned int __unused; 693 unsigned int pr_fsr; 694 unsigned char pr_qcnt; 695 unsigned char pr_q_entrysize; 696 unsigned char pr_en; 697 unsigned int pr_q[64]; 698 } elf_fpregset_t32; 699 700 /* 701 * fill in the fpu structure for a core dump. 702 */ 703 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) 704 { 705 unsigned long *kfpregs = current_thread_info()->fpregs; 706 unsigned long fprs = current_thread_info()->fpsaved[0]; 707 708 if (test_thread_flag(TIF_32BIT)) { 709 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; 710 711 if (fprs & FPRS_DL) 712 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, 713 sizeof(unsigned int) * 32); 714 else 715 memset(&fpregs32->pr_fr.pr_regs[0], 0, 716 sizeof(unsigned int) * 32); 717 fpregs32->pr_qcnt = 0; 718 fpregs32->pr_q_entrysize = 8; 719 memset(&fpregs32->pr_q[0], 0, 720 (sizeof(unsigned int) * 64)); 721 if (fprs & FPRS_FEF) { 722 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; 723 fpregs32->pr_en = 1; 724 } else { 725 fpregs32->pr_fsr = 0; 726 fpregs32->pr_en = 0; 727 } 728 } else { 729 if(fprs & FPRS_DL) 730 memcpy(&fpregs->pr_regs[0], kfpregs, 731 sizeof(unsigned int) * 32); 732 else 733 memset(&fpregs->pr_regs[0], 0, 734 sizeof(unsigned int) * 32); 735 if(fprs & FPRS_DU) 736 memcpy(&fpregs->pr_regs[16], kfpregs+16, 737 sizeof(unsigned int) * 32); 738 else 739 memset(&fpregs->pr_regs[16], 0, 740 sizeof(unsigned int) * 32); 741 if(fprs & FPRS_FEF) { 742 fpregs->pr_fsr = current_thread_info()->xfsr[0]; 743 fpregs->pr_gsr = current_thread_info()->gsr[0]; 744 } else { 745 fpregs->pr_fsr = fpregs->pr_gsr = 0; 746 } 747 fpregs->pr_fprs = fprs; 748 } 749 return 1; 750 } 751 EXPORT_SYMBOL(dump_fpu); 752 753 unsigned long get_wchan(struct task_struct *task) 754 { 755 unsigned long pc, fp, bias = 0; 756 struct thread_info *tp; 757 struct reg_window *rw; 758 unsigned long ret = 0; 759 int count = 0; 760 761 if (!task || task == current || 762 task->state == TASK_RUNNING) 763 goto out; 764 765 tp = task_thread_info(task); 766 bias = STACK_BIAS; 767 fp = task_thread_info(task)->ksp + bias; 768 769 do { 770 if (!kstack_valid(tp, fp)) 771 break; 772 rw = (struct reg_window *) fp; 773 pc = rw->ins[7]; 774 if (!in_sched_functions(pc)) { 775 ret = pc; 776 goto out; 777 } 778 fp = rw->ins[6] + bias; 779 } while (++count < 16); 780 781 out: 782 return ret; 783 } 784