1 /* 2 * Derived from "arch/i386/kernel/process.c" 3 * Copyright (C) 1995 Linus Torvalds 4 * 5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 6 * Paul Mackerras (paulus@cs.anu.edu.au) 7 * 8 * PowerPC version 9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/smp.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/slab.h> 26 #include <linux/user.h> 27 #include <linux/elf.h> 28 #include <linux/init.h> 29 #include <linux/prctl.h> 30 #include <linux/init_task.h> 31 #include <linux/export.h> 32 #include <linux/kallsyms.h> 33 #include <linux/mqueue.h> 34 #include <linux/hardirq.h> 35 #include <linux/utsname.h> 36 #include <linux/ftrace.h> 37 #include <linux/kernel_stat.h> 38 #include <linux/personality.h> 39 #include <linux/random.h> 40 #include <linux/hw_breakpoint.h> 41 42 #include <asm/pgtable.h> 43 #include <asm/uaccess.h> 44 #include <asm/io.h> 45 #include <asm/processor.h> 46 #include <asm/mmu.h> 47 #include <asm/prom.h> 48 #include <asm/machdep.h> 49 #include <asm/time.h> 50 #include <asm/runlatch.h> 51 #include <asm/syscalls.h> 52 #include <asm/switch_to.h> 53 #include <asm/tm.h> 54 #include <asm/debug.h> 55 #ifdef CONFIG_PPC64 56 #include <asm/firmware.h> 57 #endif 58 #include <linux/kprobes.h> 59 #include <linux/kdebug.h> 60 61 /* Transactional Memory debug */ 62 #ifdef TM_DEBUG_SW 63 #define TM_DEBUG(x...) printk(KERN_INFO x) 64 #else 65 #define TM_DEBUG(x...) do { } while(0) 66 #endif 67 68 extern unsigned long _get_SP(void); 69 70 #ifndef CONFIG_SMP 71 struct task_struct *last_task_used_math = NULL; 72 struct task_struct *last_task_used_altivec = NULL; 73 struct task_struct *last_task_used_vsx = NULL; 74 struct task_struct *last_task_used_spe = NULL; 75 #endif 76 77 /* 78 * Make sure the floating-point register state in the 79 * the thread_struct is up to date for task tsk. 80 */ 81 void flush_fp_to_thread(struct task_struct *tsk) 82 { 83 if (tsk->thread.regs) { 84 /* 85 * We need to disable preemption here because if we didn't, 86 * another process could get scheduled after the regs->msr 87 * test but before we have finished saving the FP registers 88 * to the thread_struct. That process could take over the 89 * FPU, and then when we get scheduled again we would store 90 * bogus values for the remaining FP registers. 91 */ 92 preempt_disable(); 93 if (tsk->thread.regs->msr & MSR_FP) { 94 #ifdef CONFIG_SMP 95 /* 96 * This should only ever be called for current or 97 * for a stopped child process. Since we save away 98 * the FP register state on context switch on SMP, 99 * there is something wrong if a stopped child appears 100 * to still have its FP state in the CPU registers. 101 */ 102 BUG_ON(tsk != current); 103 #endif 104 giveup_fpu(tsk); 105 } 106 preempt_enable(); 107 } 108 } 109 EXPORT_SYMBOL_GPL(flush_fp_to_thread); 110 111 void enable_kernel_fp(void) 112 { 113 WARN_ON(preemptible()); 114 115 #ifdef CONFIG_SMP 116 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 117 giveup_fpu(current); 118 else 119 giveup_fpu(NULL); /* just enables FP for kernel */ 120 #else 121 giveup_fpu(last_task_used_math); 122 #endif /* CONFIG_SMP */ 123 } 124 EXPORT_SYMBOL(enable_kernel_fp); 125 126 #ifdef CONFIG_ALTIVEC 127 void enable_kernel_altivec(void) 128 { 129 WARN_ON(preemptible()); 130 131 #ifdef CONFIG_SMP 132 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 133 giveup_altivec(current); 134 else 135 giveup_altivec_notask(); 136 #else 137 giveup_altivec(last_task_used_altivec); 138 #endif /* CONFIG_SMP */ 139 } 140 EXPORT_SYMBOL(enable_kernel_altivec); 141 142 /* 143 * Make sure the VMX/Altivec register state in the 144 * the thread_struct is up to date for task tsk. 145 */ 146 void flush_altivec_to_thread(struct task_struct *tsk) 147 { 148 if (tsk->thread.regs) { 149 preempt_disable(); 150 if (tsk->thread.regs->msr & MSR_VEC) { 151 #ifdef CONFIG_SMP 152 BUG_ON(tsk != current); 153 #endif 154 giveup_altivec(tsk); 155 } 156 preempt_enable(); 157 } 158 } 159 EXPORT_SYMBOL_GPL(flush_altivec_to_thread); 160 #endif /* CONFIG_ALTIVEC */ 161 162 #ifdef CONFIG_VSX 163 #if 0 164 /* not currently used, but some crazy RAID module might want to later */ 165 void enable_kernel_vsx(void) 166 { 167 WARN_ON(preemptible()); 168 169 #ifdef CONFIG_SMP 170 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) 171 giveup_vsx(current); 172 else 173 giveup_vsx(NULL); /* just enable vsx for kernel - force */ 174 #else 175 giveup_vsx(last_task_used_vsx); 176 #endif /* CONFIG_SMP */ 177 } 178 EXPORT_SYMBOL(enable_kernel_vsx); 179 #endif 180 181 void giveup_vsx(struct task_struct *tsk) 182 { 183 giveup_fpu(tsk); 184 giveup_altivec(tsk); 185 __giveup_vsx(tsk); 186 } 187 188 void flush_vsx_to_thread(struct task_struct *tsk) 189 { 190 if (tsk->thread.regs) { 191 preempt_disable(); 192 if (tsk->thread.regs->msr & MSR_VSX) { 193 #ifdef CONFIG_SMP 194 BUG_ON(tsk != current); 195 #endif 196 giveup_vsx(tsk); 197 } 198 preempt_enable(); 199 } 200 } 201 EXPORT_SYMBOL_GPL(flush_vsx_to_thread); 202 #endif /* CONFIG_VSX */ 203 204 #ifdef CONFIG_SPE 205 206 void enable_kernel_spe(void) 207 { 208 WARN_ON(preemptible()); 209 210 #ifdef CONFIG_SMP 211 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) 212 giveup_spe(current); 213 else 214 giveup_spe(NULL); /* just enable SPE for kernel - force */ 215 #else 216 giveup_spe(last_task_used_spe); 217 #endif /* __SMP __ */ 218 } 219 EXPORT_SYMBOL(enable_kernel_spe); 220 221 void flush_spe_to_thread(struct task_struct *tsk) 222 { 223 if (tsk->thread.regs) { 224 preempt_disable(); 225 if (tsk->thread.regs->msr & MSR_SPE) { 226 #ifdef CONFIG_SMP 227 BUG_ON(tsk != current); 228 #endif 229 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 230 giveup_spe(tsk); 231 } 232 preempt_enable(); 233 } 234 } 235 #endif /* CONFIG_SPE */ 236 237 #ifndef CONFIG_SMP 238 /* 239 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 240 * and the current task has some state, discard it. 241 */ 242 void discard_lazy_cpu_state(void) 243 { 244 preempt_disable(); 245 if (last_task_used_math == current) 246 last_task_used_math = NULL; 247 #ifdef CONFIG_ALTIVEC 248 if (last_task_used_altivec == current) 249 last_task_used_altivec = NULL; 250 #endif /* CONFIG_ALTIVEC */ 251 #ifdef CONFIG_VSX 252 if (last_task_used_vsx == current) 253 last_task_used_vsx = NULL; 254 #endif /* CONFIG_VSX */ 255 #ifdef CONFIG_SPE 256 if (last_task_used_spe == current) 257 last_task_used_spe = NULL; 258 #endif 259 preempt_enable(); 260 } 261 #endif /* CONFIG_SMP */ 262 263 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 264 void do_send_trap(struct pt_regs *regs, unsigned long address, 265 unsigned long error_code, int signal_code, int breakpt) 266 { 267 siginfo_t info; 268 269 current->thread.trap_nr = signal_code; 270 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 271 11, SIGSEGV) == NOTIFY_STOP) 272 return; 273 274 /* Deliver the signal to userspace */ 275 info.si_signo = SIGTRAP; 276 info.si_errno = breakpt; /* breakpoint or watchpoint id */ 277 info.si_code = signal_code; 278 info.si_addr = (void __user *)address; 279 force_sig_info(SIGTRAP, &info, current); 280 } 281 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 282 void do_break (struct pt_regs *regs, unsigned long address, 283 unsigned long error_code) 284 { 285 siginfo_t info; 286 287 current->thread.trap_nr = TRAP_HWBKPT; 288 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 289 11, SIGSEGV) == NOTIFY_STOP) 290 return; 291 292 if (debugger_break_match(regs)) 293 return; 294 295 /* Clear the breakpoint */ 296 hw_breakpoint_disable(); 297 298 /* Deliver the signal to userspace */ 299 info.si_signo = SIGTRAP; 300 info.si_errno = 0; 301 info.si_code = TRAP_HWBKPT; 302 info.si_addr = (void __user *)address; 303 force_sig_info(SIGTRAP, &info, current); 304 } 305 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 306 307 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); 308 309 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 310 /* 311 * Set the debug registers back to their default "safe" values. 312 */ 313 static void set_debug_reg_defaults(struct thread_struct *thread) 314 { 315 thread->iac1 = thread->iac2 = 0; 316 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 317 thread->iac3 = thread->iac4 = 0; 318 #endif 319 thread->dac1 = thread->dac2 = 0; 320 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 321 thread->dvc1 = thread->dvc2 = 0; 322 #endif 323 thread->dbcr0 = 0; 324 #ifdef CONFIG_BOOKE 325 /* 326 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) 327 */ 328 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ 329 DBCR1_IAC3US | DBCR1_IAC4US; 330 /* 331 * Force Data Address Compare User/Supervisor bits to be User-only 332 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. 333 */ 334 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; 335 #else 336 thread->dbcr1 = 0; 337 #endif 338 } 339 340 static void prime_debug_regs(struct thread_struct *thread) 341 { 342 mtspr(SPRN_IAC1, thread->iac1); 343 mtspr(SPRN_IAC2, thread->iac2); 344 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 345 mtspr(SPRN_IAC3, thread->iac3); 346 mtspr(SPRN_IAC4, thread->iac4); 347 #endif 348 mtspr(SPRN_DAC1, thread->dac1); 349 mtspr(SPRN_DAC2, thread->dac2); 350 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 351 mtspr(SPRN_DVC1, thread->dvc1); 352 mtspr(SPRN_DVC2, thread->dvc2); 353 #endif 354 mtspr(SPRN_DBCR0, thread->dbcr0); 355 mtspr(SPRN_DBCR1, thread->dbcr1); 356 #ifdef CONFIG_BOOKE 357 mtspr(SPRN_DBCR2, thread->dbcr2); 358 #endif 359 } 360 /* 361 * Unless neither the old or new thread are making use of the 362 * debug registers, set the debug registers from the values 363 * stored in the new thread. 364 */ 365 static void switch_booke_debug_regs(struct thread_struct *new_thread) 366 { 367 if ((current->thread.dbcr0 & DBCR0_IDM) 368 || (new_thread->dbcr0 & DBCR0_IDM)) 369 prime_debug_regs(new_thread); 370 } 371 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 372 #ifndef CONFIG_HAVE_HW_BREAKPOINT 373 static void set_debug_reg_defaults(struct thread_struct *thread) 374 { 375 thread->hw_brk.address = 0; 376 thread->hw_brk.type = 0; 377 set_breakpoint(&thread->hw_brk); 378 } 379 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 380 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 381 382 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 383 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 384 { 385 mtspr(SPRN_DAC1, dabr); 386 #ifdef CONFIG_PPC_47x 387 isync(); 388 #endif 389 return 0; 390 } 391 #elif defined(CONFIG_PPC_BOOK3S) 392 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 393 { 394 mtspr(SPRN_DABR, dabr); 395 mtspr(SPRN_DABRX, dabrx); 396 return 0; 397 } 398 #else 399 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 400 { 401 return -EINVAL; 402 } 403 #endif 404 405 static inline int set_dabr(struct arch_hw_breakpoint *brk) 406 { 407 unsigned long dabr, dabrx; 408 409 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); 410 dabrx = ((brk->type >> 3) & 0x7); 411 412 if (ppc_md.set_dabr) 413 return ppc_md.set_dabr(dabr, dabrx); 414 415 return __set_dabr(dabr, dabrx); 416 } 417 418 static inline int set_dawr(struct arch_hw_breakpoint *brk) 419 { 420 unsigned long dawr, dawrx, mrd; 421 422 dawr = brk->address; 423 424 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ 425 << (63 - 58); //* read/write bits */ 426 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ 427 << (63 - 59); //* translate */ 428 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ 429 >> 3; //* PRIM bits */ 430 /* dawr length is stored in field MDR bits 48:53. Matches range in 431 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and 432 0b111111=64DW. 433 brk->len is in bytes. 434 This aligns up to double word size, shifts and does the bias. 435 */ 436 mrd = ((brk->len + 7) >> 3) - 1; 437 dawrx |= (mrd & 0x3f) << (63 - 53); 438 439 if (ppc_md.set_dawr) 440 return ppc_md.set_dawr(dawr, dawrx); 441 mtspr(SPRN_DAWR, dawr); 442 mtspr(SPRN_DAWRX, dawrx); 443 return 0; 444 } 445 446 int set_breakpoint(struct arch_hw_breakpoint *brk) 447 { 448 __get_cpu_var(current_brk) = *brk; 449 450 if (cpu_has_feature(CPU_FTR_DAWR)) 451 return set_dawr(brk); 452 453 return set_dabr(brk); 454 } 455 456 #ifdef CONFIG_PPC64 457 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 458 #endif 459 460 static inline bool hw_brk_match(struct arch_hw_breakpoint *a, 461 struct arch_hw_breakpoint *b) 462 { 463 if (a->address != b->address) 464 return false; 465 if (a->type != b->type) 466 return false; 467 if (a->len != b->len) 468 return false; 469 return true; 470 } 471 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 472 static inline void tm_reclaim_task(struct task_struct *tsk) 473 { 474 /* We have to work out if we're switching from/to a task that's in the 475 * middle of a transaction. 476 * 477 * In switching we need to maintain a 2nd register state as 478 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the 479 * checkpointed (tbegin) state in ckpt_regs and saves the transactional 480 * (current) FPRs into oldtask->thread.transact_fpr[]. 481 * 482 * We also context switch (save) TFHAR/TEXASR/TFIAR in here. 483 */ 484 struct thread_struct *thr = &tsk->thread; 485 486 if (!thr->regs) 487 return; 488 489 if (!MSR_TM_ACTIVE(thr->regs->msr)) 490 goto out_and_saveregs; 491 492 /* Stash the original thread MSR, as giveup_fpu et al will 493 * modify it. We hold onto it to see whether the task used 494 * FP & vector regs. 495 */ 496 thr->tm_orig_msr = thr->regs->msr; 497 498 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " 499 "ccr=%lx, msr=%lx, trap=%lx)\n", 500 tsk->pid, thr->regs->nip, 501 thr->regs->ccr, thr->regs->msr, 502 thr->regs->trap); 503 504 tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); 505 506 TM_DEBUG("--- tm_reclaim on pid %d complete\n", 507 tsk->pid); 508 509 out_and_saveregs: 510 /* Always save the regs here, even if a transaction's not active. 511 * This context-switches a thread's TM info SPRs. We do it here to 512 * be consistent with the restore path (in recheckpoint) which 513 * cannot happen later in _switch(). 514 */ 515 tm_save_sprs(thr); 516 } 517 518 static inline void tm_recheckpoint_new_task(struct task_struct *new) 519 { 520 unsigned long msr; 521 522 if (!cpu_has_feature(CPU_FTR_TM)) 523 return; 524 525 /* Recheckpoint the registers of the thread we're about to switch to. 526 * 527 * If the task was using FP, we non-lazily reload both the original and 528 * the speculative FP register states. This is because the kernel 529 * doesn't see if/when a TM rollback occurs, so if we take an FP 530 * unavoidable later, we are unable to determine which set of FP regs 531 * need to be restored. 532 */ 533 if (!new->thread.regs) 534 return; 535 536 /* The TM SPRs are restored here, so that TEXASR.FS can be set 537 * before the trecheckpoint and no explosion occurs. 538 */ 539 tm_restore_sprs(&new->thread); 540 541 if (!MSR_TM_ACTIVE(new->thread.regs->msr)) 542 return; 543 msr = new->thread.tm_orig_msr; 544 /* Recheckpoint to restore original checkpointed register state. */ 545 TM_DEBUG("*** tm_recheckpoint of pid %d " 546 "(new->msr 0x%lx, new->origmsr 0x%lx)\n", 547 new->pid, new->thread.regs->msr, msr); 548 549 /* This loads the checkpointed FP/VEC state, if used */ 550 tm_recheckpoint(&new->thread, msr); 551 552 /* This loads the speculative FP/VEC state, if used */ 553 if (msr & MSR_FP) { 554 do_load_up_transact_fpu(&new->thread); 555 new->thread.regs->msr |= 556 (MSR_FP | new->thread.fpexc_mode); 557 } 558 #ifdef CONFIG_ALTIVEC 559 if (msr & MSR_VEC) { 560 do_load_up_transact_altivec(&new->thread); 561 new->thread.regs->msr |= MSR_VEC; 562 } 563 #endif 564 /* We may as well turn on VSX too since all the state is restored now */ 565 if (msr & MSR_VSX) 566 new->thread.regs->msr |= MSR_VSX; 567 568 TM_DEBUG("*** tm_recheckpoint of pid %d complete " 569 "(kernel msr 0x%lx)\n", 570 new->pid, mfmsr()); 571 } 572 573 static inline void __switch_to_tm(struct task_struct *prev) 574 { 575 if (cpu_has_feature(CPU_FTR_TM)) { 576 tm_enable(); 577 tm_reclaim_task(prev); 578 } 579 } 580 #else 581 #define tm_recheckpoint_new_task(new) 582 #define __switch_to_tm(prev) 583 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 584 585 struct task_struct *__switch_to(struct task_struct *prev, 586 struct task_struct *new) 587 { 588 struct thread_struct *new_thread, *old_thread; 589 unsigned long flags; 590 struct task_struct *last; 591 #ifdef CONFIG_PPC_BOOK3S_64 592 struct ppc64_tlb_batch *batch; 593 #endif 594 595 __switch_to_tm(prev); 596 597 #ifdef CONFIG_SMP 598 /* avoid complexity of lazy save/restore of fpu 599 * by just saving it every time we switch out if 600 * this task used the fpu during the last quantum. 601 * 602 * If it tries to use the fpu again, it'll trap and 603 * reload its fp regs. So we don't have to do a restore 604 * every switch, just a save. 605 * -- Cort 606 */ 607 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 608 giveup_fpu(prev); 609 #ifdef CONFIG_ALTIVEC 610 /* 611 * If the previous thread used altivec in the last quantum 612 * (thus changing altivec regs) then save them. 613 * We used to check the VRSAVE register but not all apps 614 * set it, so we don't rely on it now (and in fact we need 615 * to save & restore VSCR even if VRSAVE == 0). -- paulus 616 * 617 * On SMP we always save/restore altivec regs just to avoid the 618 * complexity of changing processors. 619 * -- Cort 620 */ 621 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 622 giveup_altivec(prev); 623 #endif /* CONFIG_ALTIVEC */ 624 #ifdef CONFIG_VSX 625 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) 626 /* VMX and FPU registers are already save here */ 627 __giveup_vsx(prev); 628 #endif /* CONFIG_VSX */ 629 #ifdef CONFIG_SPE 630 /* 631 * If the previous thread used spe in the last quantum 632 * (thus changing spe regs) then save them. 633 * 634 * On SMP we always save/restore spe regs just to avoid the 635 * complexity of changing processors. 636 */ 637 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) 638 giveup_spe(prev); 639 #endif /* CONFIG_SPE */ 640 641 #else /* CONFIG_SMP */ 642 #ifdef CONFIG_ALTIVEC 643 /* Avoid the trap. On smp this this never happens since 644 * we don't set last_task_used_altivec -- Cort 645 */ 646 if (new->thread.regs && last_task_used_altivec == new) 647 new->thread.regs->msr |= MSR_VEC; 648 #endif /* CONFIG_ALTIVEC */ 649 #ifdef CONFIG_VSX 650 if (new->thread.regs && last_task_used_vsx == new) 651 new->thread.regs->msr |= MSR_VSX; 652 #endif /* CONFIG_VSX */ 653 #ifdef CONFIG_SPE 654 /* Avoid the trap. On smp this this never happens since 655 * we don't set last_task_used_spe 656 */ 657 if (new->thread.regs && last_task_used_spe == new) 658 new->thread.regs->msr |= MSR_SPE; 659 #endif /* CONFIG_SPE */ 660 661 #endif /* CONFIG_SMP */ 662 663 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 664 switch_booke_debug_regs(&new->thread); 665 #else 666 /* 667 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 668 * schedule DABR 669 */ 670 #ifndef CONFIG_HAVE_HW_BREAKPOINT 671 if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) 672 set_breakpoint(&new->thread.hw_brk); 673 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 674 #endif 675 676 677 new_thread = &new->thread; 678 old_thread = ¤t->thread; 679 680 #ifdef CONFIG_PPC64 681 /* 682 * Collect processor utilization data per process 683 */ 684 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 685 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 686 long unsigned start_tb, current_tb; 687 start_tb = old_thread->start_tb; 688 cu->current_tb = current_tb = mfspr(SPRN_PURR); 689 old_thread->accum_tb += (current_tb - start_tb); 690 new_thread->start_tb = current_tb; 691 } 692 #endif /* CONFIG_PPC64 */ 693 694 #ifdef CONFIG_PPC_BOOK3S_64 695 batch = &__get_cpu_var(ppc64_tlb_batch); 696 if (batch->active) { 697 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 698 if (batch->index) 699 __flush_tlb_pending(batch); 700 batch->active = 0; 701 } 702 #endif /* CONFIG_PPC_BOOK3S_64 */ 703 704 local_irq_save(flags); 705 706 /* 707 * We can't take a PMU exception inside _switch() since there is a 708 * window where the kernel stack SLB and the kernel stack are out 709 * of sync. Hard disable here. 710 */ 711 hard_irq_disable(); 712 713 tm_recheckpoint_new_task(new); 714 715 last = _switch(old_thread, new_thread); 716 717 #ifdef CONFIG_PPC_BOOK3S_64 718 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 719 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 720 batch = &__get_cpu_var(ppc64_tlb_batch); 721 batch->active = 1; 722 } 723 #endif /* CONFIG_PPC_BOOK3S_64 */ 724 725 local_irq_restore(flags); 726 727 return last; 728 } 729 730 static int instructions_to_print = 16; 731 732 static void show_instructions(struct pt_regs *regs) 733 { 734 int i; 735 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * 736 sizeof(int)); 737 738 printk("Instruction dump:"); 739 740 for (i = 0; i < instructions_to_print; i++) { 741 int instr; 742 743 if (!(i % 8)) 744 printk("\n"); 745 746 #if !defined(CONFIG_BOOKE) 747 /* If executing with the IMMU off, adjust pc rather 748 * than print XXXXXXXX. 749 */ 750 if (!(regs->msr & MSR_IR)) 751 pc = (unsigned long)phys_to_virt(pc); 752 #endif 753 754 /* We use __get_user here *only* to avoid an OOPS on a 755 * bad address because the pc *should* only be a 756 * kernel address. 757 */ 758 if (!__kernel_text_address(pc) || 759 __get_user(instr, (unsigned int __user *)pc)) { 760 printk(KERN_CONT "XXXXXXXX "); 761 } else { 762 if (regs->nip == pc) 763 printk(KERN_CONT "<%08x> ", instr); 764 else 765 printk(KERN_CONT "%08x ", instr); 766 } 767 768 pc += sizeof(int); 769 } 770 771 printk("\n"); 772 } 773 774 static struct regbit { 775 unsigned long bit; 776 const char *name; 777 } msr_bits[] = { 778 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) 779 {MSR_SF, "SF"}, 780 {MSR_HV, "HV"}, 781 #endif 782 {MSR_VEC, "VEC"}, 783 {MSR_VSX, "VSX"}, 784 #ifdef CONFIG_BOOKE 785 {MSR_CE, "CE"}, 786 #endif 787 {MSR_EE, "EE"}, 788 {MSR_PR, "PR"}, 789 {MSR_FP, "FP"}, 790 {MSR_ME, "ME"}, 791 #ifdef CONFIG_BOOKE 792 {MSR_DE, "DE"}, 793 #else 794 {MSR_SE, "SE"}, 795 {MSR_BE, "BE"}, 796 #endif 797 {MSR_IR, "IR"}, 798 {MSR_DR, "DR"}, 799 {MSR_PMM, "PMM"}, 800 #ifndef CONFIG_BOOKE 801 {MSR_RI, "RI"}, 802 {MSR_LE, "LE"}, 803 #endif 804 {0, NULL} 805 }; 806 807 static void printbits(unsigned long val, struct regbit *bits) 808 { 809 const char *sep = ""; 810 811 printk("<"); 812 for (; bits->bit; ++bits) 813 if (val & bits->bit) { 814 printk("%s%s", sep, bits->name); 815 sep = ","; 816 } 817 printk(">"); 818 } 819 820 #ifdef CONFIG_PPC64 821 #define REG "%016lx" 822 #define REGS_PER_LINE 4 823 #define LAST_VOLATILE 13 824 #else 825 #define REG "%08lx" 826 #define REGS_PER_LINE 8 827 #define LAST_VOLATILE 12 828 #endif 829 830 void show_regs(struct pt_regs * regs) 831 { 832 int i, trap; 833 834 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 835 regs->nip, regs->link, regs->ctr); 836 printk("REGS: %p TRAP: %04lx %s (%s)\n", 837 regs, regs->trap, print_tainted(), init_utsname()->release); 838 printk("MSR: "REG" ", regs->msr); 839 printbits(regs->msr, msr_bits); 840 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 841 #ifdef CONFIG_PPC64 842 printk("SOFTE: %ld\n", regs->softe); 843 #endif 844 trap = TRAP(regs); 845 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 846 printk("CFAR: "REG"\n", regs->orig_gpr3); 847 if (trap == 0x300 || trap == 0x600) 848 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 849 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 850 #else 851 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 852 #endif 853 printk("TASK = %p[%d] '%s' THREAD: %p", 854 current, task_pid_nr(current), current->comm, task_thread_info(current)); 855 856 #ifdef CONFIG_SMP 857 printk(" CPU: %d", raw_smp_processor_id()); 858 #endif /* CONFIG_SMP */ 859 860 for (i = 0; i < 32; i++) { 861 if ((i % REGS_PER_LINE) == 0) 862 printk("\nGPR%02d: ", i); 863 printk(REG " ", regs->gpr[i]); 864 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 865 break; 866 } 867 printk("\n"); 868 #ifdef CONFIG_KALLSYMS 869 /* 870 * Lookup NIP late so we have the best change of getting the 871 * above info out without failing 872 */ 873 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); 874 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); 875 #endif 876 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 877 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch); 878 #endif 879 show_stack(current, (unsigned long *) regs->gpr[1]); 880 if (!user_mode(regs)) 881 show_instructions(regs); 882 } 883 884 void exit_thread(void) 885 { 886 discard_lazy_cpu_state(); 887 } 888 889 void flush_thread(void) 890 { 891 discard_lazy_cpu_state(); 892 893 #ifdef CONFIG_HAVE_HW_BREAKPOINT 894 flush_ptrace_hw_breakpoint(current); 895 #else /* CONFIG_HAVE_HW_BREAKPOINT */ 896 set_debug_reg_defaults(¤t->thread); 897 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 898 } 899 900 void 901 release_thread(struct task_struct *t) 902 { 903 } 904 905 /* 906 * this gets called so that we can store coprocessor state into memory and 907 * copy the current task into the new thread. 908 */ 909 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 910 { 911 flush_fp_to_thread(src); 912 flush_altivec_to_thread(src); 913 flush_vsx_to_thread(src); 914 flush_spe_to_thread(src); 915 #ifdef CONFIG_HAVE_HW_BREAKPOINT 916 flush_ptrace_hw_breakpoint(src); 917 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 918 919 *dst = *src; 920 return 0; 921 } 922 923 /* 924 * Copy a thread.. 925 */ 926 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ 927 928 int copy_thread(unsigned long clone_flags, unsigned long usp, 929 unsigned long arg, struct task_struct *p) 930 { 931 struct pt_regs *childregs, *kregs; 932 extern void ret_from_fork(void); 933 extern void ret_from_kernel_thread(void); 934 void (*f)(void); 935 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 936 937 /* Copy registers */ 938 sp -= sizeof(struct pt_regs); 939 childregs = (struct pt_regs *) sp; 940 if (unlikely(p->flags & PF_KTHREAD)) { 941 struct thread_info *ti = (void *)task_stack_page(p); 942 memset(childregs, 0, sizeof(struct pt_regs)); 943 childregs->gpr[1] = sp + sizeof(struct pt_regs); 944 childregs->gpr[14] = usp; /* function */ 945 #ifdef CONFIG_PPC64 946 clear_tsk_thread_flag(p, TIF_32BIT); 947 childregs->softe = 1; 948 #endif 949 childregs->gpr[15] = arg; 950 p->thread.regs = NULL; /* no user register state */ 951 ti->flags |= _TIF_RESTOREALL; 952 f = ret_from_kernel_thread; 953 } else { 954 struct pt_regs *regs = current_pt_regs(); 955 CHECK_FULL_REGS(regs); 956 *childregs = *regs; 957 if (usp) 958 childregs->gpr[1] = usp; 959 p->thread.regs = childregs; 960 childregs->gpr[3] = 0; /* Result from fork() */ 961 if (clone_flags & CLONE_SETTLS) { 962 #ifdef CONFIG_PPC64 963 if (!is_32bit_task()) 964 childregs->gpr[13] = childregs->gpr[6]; 965 else 966 #endif 967 childregs->gpr[2] = childregs->gpr[6]; 968 } 969 970 f = ret_from_fork; 971 } 972 sp -= STACK_FRAME_OVERHEAD; 973 974 /* 975 * The way this works is that at some point in the future 976 * some task will call _switch to switch to the new task. 977 * That will pop off the stack frame created below and start 978 * the new task running at ret_from_fork. The new task will 979 * do some house keeping and then return from the fork or clone 980 * system call, using the stack frame created above. 981 */ 982 sp -= sizeof(struct pt_regs); 983 kregs = (struct pt_regs *) sp; 984 sp -= STACK_FRAME_OVERHEAD; 985 p->thread.ksp = sp; 986 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 987 _ALIGN_UP(sizeof(struct thread_info), 16); 988 989 #ifdef CONFIG_PPC_STD_MMU_64 990 if (mmu_has_feature(MMU_FTR_SLB)) { 991 unsigned long sp_vsid; 992 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 993 994 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 995 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) 996 << SLB_VSID_SHIFT_1T; 997 else 998 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) 999 << SLB_VSID_SHIFT; 1000 sp_vsid |= SLB_VSID_KERNEL | llp; 1001 p->thread.ksp_vsid = sp_vsid; 1002 } 1003 #endif /* CONFIG_PPC_STD_MMU_64 */ 1004 #ifdef CONFIG_PPC64 1005 if (cpu_has_feature(CPU_FTR_DSCR)) { 1006 p->thread.dscr_inherit = current->thread.dscr_inherit; 1007 p->thread.dscr = current->thread.dscr; 1008 } 1009 if (cpu_has_feature(CPU_FTR_HAS_PPR)) 1010 p->thread.ppr = INIT_PPR; 1011 #endif 1012 /* 1013 * The PPC64 ABI makes use of a TOC to contain function 1014 * pointers. The function (ret_from_except) is actually a pointer 1015 * to the TOC entry. The first entry is a pointer to the actual 1016 * function. 1017 */ 1018 #ifdef CONFIG_PPC64 1019 kregs->nip = *((unsigned long *)f); 1020 #else 1021 kregs->nip = (unsigned long)f; 1022 #endif 1023 return 0; 1024 } 1025 1026 /* 1027 * Set up a thread for executing a new program 1028 */ 1029 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) 1030 { 1031 #ifdef CONFIG_PPC64 1032 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ 1033 #endif 1034 1035 /* 1036 * If we exec out of a kernel thread then thread.regs will not be 1037 * set. Do it now. 1038 */ 1039 if (!current->thread.regs) { 1040 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; 1041 current->thread.regs = regs - 1; 1042 } 1043 1044 memset(regs->gpr, 0, sizeof(regs->gpr)); 1045 regs->ctr = 0; 1046 regs->link = 0; 1047 regs->xer = 0; 1048 regs->ccr = 0; 1049 regs->gpr[1] = sp; 1050 1051 /* 1052 * We have just cleared all the nonvolatile GPRs, so make 1053 * FULL_REGS(regs) return true. This is necessary to allow 1054 * ptrace to examine the thread immediately after exec. 1055 */ 1056 regs->trap &= ~1UL; 1057 1058 #ifdef CONFIG_PPC32 1059 regs->mq = 0; 1060 regs->nip = start; 1061 regs->msr = MSR_USER; 1062 #else 1063 if (!is_32bit_task()) { 1064 unsigned long entry, toc; 1065 1066 /* start is a relocated pointer to the function descriptor for 1067 * the elf _start routine. The first entry in the function 1068 * descriptor is the entry address of _start and the second 1069 * entry is the TOC value we need to use. 1070 */ 1071 __get_user(entry, (unsigned long __user *)start); 1072 __get_user(toc, (unsigned long __user *)start+1); 1073 1074 /* Check whether the e_entry function descriptor entries 1075 * need to be relocated before we can use them. 1076 */ 1077 if (load_addr != 0) { 1078 entry += load_addr; 1079 toc += load_addr; 1080 } 1081 regs->nip = entry; 1082 regs->gpr[2] = toc; 1083 regs->msr = MSR_USER64; 1084 } else { 1085 regs->nip = start; 1086 regs->gpr[2] = 0; 1087 regs->msr = MSR_USER32; 1088 } 1089 #endif 1090 discard_lazy_cpu_state(); 1091 #ifdef CONFIG_VSX 1092 current->thread.used_vsr = 0; 1093 #endif 1094 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 1095 current->thread.fpscr.val = 0; 1096 #ifdef CONFIG_ALTIVEC 1097 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 1098 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); 1099 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 1100 current->thread.vrsave = 0; 1101 current->thread.used_vr = 0; 1102 #endif /* CONFIG_ALTIVEC */ 1103 #ifdef CONFIG_SPE 1104 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 1105 current->thread.acc = 0; 1106 current->thread.spefscr = 0; 1107 current->thread.used_spe = 0; 1108 #endif /* CONFIG_SPE */ 1109 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1110 if (cpu_has_feature(CPU_FTR_TM)) 1111 regs->msr |= MSR_TM; 1112 current->thread.tm_tfhar = 0; 1113 current->thread.tm_texasr = 0; 1114 current->thread.tm_tfiar = 0; 1115 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1116 } 1117 1118 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 1119 | PR_FP_EXC_RES | PR_FP_EXC_INV) 1120 1121 int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 1122 { 1123 struct pt_regs *regs = tsk->thread.regs; 1124 1125 /* This is a bit hairy. If we are an SPE enabled processor 1126 * (have embedded fp) we store the IEEE exception enable flags in 1127 * fpexc_mode. fpexc_mode is also used for setting FP exception 1128 * mode (asyn, precise, disabled) for 'Classic' FP. */ 1129 if (val & PR_FP_EXC_SW_ENABLE) { 1130 #ifdef CONFIG_SPE 1131 if (cpu_has_feature(CPU_FTR_SPE)) { 1132 tsk->thread.fpexc_mode = val & 1133 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 1134 return 0; 1135 } else { 1136 return -EINVAL; 1137 } 1138 #else 1139 return -EINVAL; 1140 #endif 1141 } 1142 1143 /* on a CONFIG_SPE this does not hurt us. The bits that 1144 * __pack_fe01 use do not overlap with bits used for 1145 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 1146 * on CONFIG_SPE implementations are reserved so writing to 1147 * them does not change anything */ 1148 if (val > PR_FP_EXC_PRECISE) 1149 return -EINVAL; 1150 tsk->thread.fpexc_mode = __pack_fe01(val); 1151 if (regs != NULL && (regs->msr & MSR_FP) != 0) 1152 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 1153 | tsk->thread.fpexc_mode; 1154 return 0; 1155 } 1156 1157 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 1158 { 1159 unsigned int val; 1160 1161 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 1162 #ifdef CONFIG_SPE 1163 if (cpu_has_feature(CPU_FTR_SPE)) 1164 val = tsk->thread.fpexc_mode; 1165 else 1166 return -EINVAL; 1167 #else 1168 return -EINVAL; 1169 #endif 1170 else 1171 val = __unpack_fe01(tsk->thread.fpexc_mode); 1172 return put_user(val, (unsigned int __user *) adr); 1173 } 1174 1175 int set_endian(struct task_struct *tsk, unsigned int val) 1176 { 1177 struct pt_regs *regs = tsk->thread.regs; 1178 1179 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || 1180 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) 1181 return -EINVAL; 1182 1183 if (regs == NULL) 1184 return -EINVAL; 1185 1186 if (val == PR_ENDIAN_BIG) 1187 regs->msr &= ~MSR_LE; 1188 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) 1189 regs->msr |= MSR_LE; 1190 else 1191 return -EINVAL; 1192 1193 return 0; 1194 } 1195 1196 int get_endian(struct task_struct *tsk, unsigned long adr) 1197 { 1198 struct pt_regs *regs = tsk->thread.regs; 1199 unsigned int val; 1200 1201 if (!cpu_has_feature(CPU_FTR_PPC_LE) && 1202 !cpu_has_feature(CPU_FTR_REAL_LE)) 1203 return -EINVAL; 1204 1205 if (regs == NULL) 1206 return -EINVAL; 1207 1208 if (regs->msr & MSR_LE) { 1209 if (cpu_has_feature(CPU_FTR_REAL_LE)) 1210 val = PR_ENDIAN_LITTLE; 1211 else 1212 val = PR_ENDIAN_PPC_LITTLE; 1213 } else 1214 val = PR_ENDIAN_BIG; 1215 1216 return put_user(val, (unsigned int __user *)adr); 1217 } 1218 1219 int set_unalign_ctl(struct task_struct *tsk, unsigned int val) 1220 { 1221 tsk->thread.align_ctl = val; 1222 return 0; 1223 } 1224 1225 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) 1226 { 1227 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); 1228 } 1229 1230 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, 1231 unsigned long nbytes) 1232 { 1233 unsigned long stack_page; 1234 unsigned long cpu = task_cpu(p); 1235 1236 /* 1237 * Avoid crashing if the stack has overflowed and corrupted 1238 * task_cpu(p), which is in the thread_info struct. 1239 */ 1240 if (cpu < NR_CPUS && cpu_possible(cpu)) { 1241 stack_page = (unsigned long) hardirq_ctx[cpu]; 1242 if (sp >= stack_page + sizeof(struct thread_struct) 1243 && sp <= stack_page + THREAD_SIZE - nbytes) 1244 return 1; 1245 1246 stack_page = (unsigned long) softirq_ctx[cpu]; 1247 if (sp >= stack_page + sizeof(struct thread_struct) 1248 && sp <= stack_page + THREAD_SIZE - nbytes) 1249 return 1; 1250 } 1251 return 0; 1252 } 1253 1254 int validate_sp(unsigned long sp, struct task_struct *p, 1255 unsigned long nbytes) 1256 { 1257 unsigned long stack_page = (unsigned long)task_stack_page(p); 1258 1259 if (sp >= stack_page + sizeof(struct thread_struct) 1260 && sp <= stack_page + THREAD_SIZE - nbytes) 1261 return 1; 1262 1263 return valid_irq_stack(sp, p, nbytes); 1264 } 1265 1266 EXPORT_SYMBOL(validate_sp); 1267 1268 unsigned long get_wchan(struct task_struct *p) 1269 { 1270 unsigned long ip, sp; 1271 int count = 0; 1272 1273 if (!p || p == current || p->state == TASK_RUNNING) 1274 return 0; 1275 1276 sp = p->thread.ksp; 1277 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 1278 return 0; 1279 1280 do { 1281 sp = *(unsigned long *)sp; 1282 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 1283 return 0; 1284 if (count > 0) { 1285 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; 1286 if (!in_sched_functions(ip)) 1287 return ip; 1288 } 1289 } while (count++ < 16); 1290 return 0; 1291 } 1292 1293 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; 1294 1295 void show_stack(struct task_struct *tsk, unsigned long *stack) 1296 { 1297 unsigned long sp, ip, lr, newsp; 1298 int count = 0; 1299 int firstframe = 1; 1300 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1301 int curr_frame = current->curr_ret_stack; 1302 extern void return_to_handler(void); 1303 unsigned long rth = (unsigned long)return_to_handler; 1304 unsigned long mrth = -1; 1305 #ifdef CONFIG_PPC64 1306 extern void mod_return_to_handler(void); 1307 rth = *(unsigned long *)rth; 1308 mrth = (unsigned long)mod_return_to_handler; 1309 mrth = *(unsigned long *)mrth; 1310 #endif 1311 #endif 1312 1313 sp = (unsigned long) stack; 1314 if (tsk == NULL) 1315 tsk = current; 1316 if (sp == 0) { 1317 if (tsk == current) 1318 asm("mr %0,1" : "=r" (sp)); 1319 else 1320 sp = tsk->thread.ksp; 1321 } 1322 1323 lr = 0; 1324 printk("Call Trace:\n"); 1325 do { 1326 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) 1327 return; 1328 1329 stack = (unsigned long *) sp; 1330 newsp = stack[0]; 1331 ip = stack[STACK_FRAME_LR_SAVE]; 1332 if (!firstframe || ip != lr) { 1333 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1334 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1335 if ((ip == rth || ip == mrth) && curr_frame >= 0) { 1336 printk(" (%pS)", 1337 (void *)current->ret_stack[curr_frame].ret); 1338 curr_frame--; 1339 } 1340 #endif 1341 if (firstframe) 1342 printk(" (unreliable)"); 1343 printk("\n"); 1344 } 1345 firstframe = 0; 1346 1347 /* 1348 * See if this is an exception frame. 1349 * We look for the "regshere" marker in the current frame. 1350 */ 1351 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) 1352 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { 1353 struct pt_regs *regs = (struct pt_regs *) 1354 (sp + STACK_FRAME_OVERHEAD); 1355 lr = regs->link; 1356 printk("--- Exception: %lx at %pS\n LR = %pS\n", 1357 regs->trap, (void *)regs->nip, (void *)lr); 1358 firstframe = 1; 1359 } 1360 1361 sp = newsp; 1362 } while (count++ < kstack_depth_to_print); 1363 } 1364 1365 void dump_stack(void) 1366 { 1367 show_stack(current, NULL); 1368 } 1369 EXPORT_SYMBOL(dump_stack); 1370 1371 #ifdef CONFIG_PPC64 1372 /* Called with hard IRQs off */ 1373 void __ppc64_runlatch_on(void) 1374 { 1375 struct thread_info *ti = current_thread_info(); 1376 unsigned long ctrl; 1377 1378 ctrl = mfspr(SPRN_CTRLF); 1379 ctrl |= CTRL_RUNLATCH; 1380 mtspr(SPRN_CTRLT, ctrl); 1381 1382 ti->local_flags |= _TLF_RUNLATCH; 1383 } 1384 1385 /* Called with hard IRQs off */ 1386 void __ppc64_runlatch_off(void) 1387 { 1388 struct thread_info *ti = current_thread_info(); 1389 unsigned long ctrl; 1390 1391 ti->local_flags &= ~_TLF_RUNLATCH; 1392 1393 ctrl = mfspr(SPRN_CTRLF); 1394 ctrl &= ~CTRL_RUNLATCH; 1395 mtspr(SPRN_CTRLT, ctrl); 1396 } 1397 #endif /* CONFIG_PPC64 */ 1398 1399 unsigned long arch_align_stack(unsigned long sp) 1400 { 1401 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 1402 sp -= get_random_int() & ~PAGE_MASK; 1403 return sp & ~0xf; 1404 } 1405 1406 static inline unsigned long brk_rnd(void) 1407 { 1408 unsigned long rnd = 0; 1409 1410 /* 8MB for 32bit, 1GB for 64bit */ 1411 if (is_32bit_task()) 1412 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); 1413 else 1414 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); 1415 1416 return rnd << PAGE_SHIFT; 1417 } 1418 1419 unsigned long arch_randomize_brk(struct mm_struct *mm) 1420 { 1421 unsigned long base = mm->brk; 1422 unsigned long ret; 1423 1424 #ifdef CONFIG_PPC_STD_MMU_64 1425 /* 1426 * If we are using 1TB segments and we are allowed to randomise 1427 * the heap, we can put it above 1TB so it is backed by a 1TB 1428 * segment. Otherwise the heap will be in the bottom 1TB 1429 * which always uses 256MB segments and this may result in a 1430 * performance penalty. 1431 */ 1432 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 1433 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); 1434 #endif 1435 1436 ret = PAGE_ALIGN(base + brk_rnd()); 1437 1438 if (ret < mm->brk) 1439 return mm->brk; 1440 1441 return ret; 1442 } 1443 1444 unsigned long randomize_et_dyn(unsigned long base) 1445 { 1446 unsigned long ret = PAGE_ALIGN(base + brk_rnd()); 1447 1448 if (ret < base) 1449 return base; 1450 1451 return ret; 1452 } 1453