1 /* 2 * Derived from "arch/i386/kernel/process.c" 3 * Copyright (C) 1995 Linus Torvalds 4 * 5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 6 * Paul Mackerras (paulus@cs.anu.edu.au) 7 * 8 * PowerPC version 9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/sched.h> 19 #include <linux/sched/debug.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/kernel.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/stddef.h> 26 #include <linux/unistd.h> 27 #include <linux/ptrace.h> 28 #include <linux/slab.h> 29 #include <linux/user.h> 30 #include <linux/elf.h> 31 #include <linux/prctl.h> 32 #include <linux/init_task.h> 33 #include <linux/export.h> 34 #include <linux/kallsyms.h> 35 #include <linux/mqueue.h> 36 #include <linux/hardirq.h> 37 #include <linux/utsname.h> 38 #include <linux/ftrace.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/personality.h> 41 #include <linux/random.h> 42 #include <linux/hw_breakpoint.h> 43 #include <linux/uaccess.h> 44 #include <linux/elf-randomize.h> 45 #include <linux/pkeys.h> 46 47 #include <asm/pgtable.h> 48 #include <asm/io.h> 49 #include <asm/processor.h> 50 #include <asm/mmu.h> 51 #include <asm/prom.h> 52 #include <asm/machdep.h> 53 #include <asm/time.h> 54 #include <asm/runlatch.h> 55 #include <asm/syscalls.h> 56 #include <asm/switch_to.h> 57 #include <asm/tm.h> 58 #include <asm/debug.h> 59 #ifdef CONFIG_PPC64 60 #include <asm/firmware.h> 61 #include <asm/hw_irq.h> 62 #endif 63 #include <asm/code-patching.h> 64 #include <asm/exec.h> 65 #include <asm/livepatch.h> 66 #include <asm/cpu_has_feature.h> 67 #include <asm/asm-prototypes.h> 68 69 #include <linux/kprobes.h> 70 #include <linux/kdebug.h> 71 72 /* Transactional Memory debug */ 73 #ifdef TM_DEBUG_SW 74 #define TM_DEBUG(x...) printk(KERN_INFO x) 75 #else 76 #define TM_DEBUG(x...) do { } while(0) 77 #endif 78 79 extern unsigned long _get_SP(void); 80 81 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 82 /* 83 * Are we running in "Suspend disabled" mode? If so we have to block any 84 * sigreturn that would get us into suspended state, and we also warn in some 85 * other paths that we should never reach with suspend disabled. 86 */ 87 bool tm_suspend_disabled __ro_after_init = false; 88 89 static void check_if_tm_restore_required(struct task_struct *tsk) 90 { 91 /* 92 * If we are saving the current thread's registers, and the 93 * thread is in a transactional state, set the TIF_RESTORE_TM 94 * bit so that we know to restore the registers before 95 * returning to userspace. 96 */ 97 if (tsk == current && tsk->thread.regs && 98 MSR_TM_ACTIVE(tsk->thread.regs->msr) && 99 !test_thread_flag(TIF_RESTORE_TM)) { 100 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; 101 set_thread_flag(TIF_RESTORE_TM); 102 } 103 } 104 105 static inline bool msr_tm_active(unsigned long msr) 106 { 107 return MSR_TM_ACTIVE(msr); 108 } 109 110 static bool tm_active_with_fp(struct task_struct *tsk) 111 { 112 return msr_tm_active(tsk->thread.regs->msr) && 113 (tsk->thread.ckpt_regs.msr & MSR_FP); 114 } 115 116 static bool tm_active_with_altivec(struct task_struct *tsk) 117 { 118 return msr_tm_active(tsk->thread.regs->msr) && 119 (tsk->thread.ckpt_regs.msr & MSR_VEC); 120 } 121 #else 122 static inline bool msr_tm_active(unsigned long msr) { return false; } 123 static inline void check_if_tm_restore_required(struct task_struct *tsk) { } 124 static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } 125 static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } 126 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 127 128 bool strict_msr_control; 129 EXPORT_SYMBOL(strict_msr_control); 130 131 static int __init enable_strict_msr_control(char *str) 132 { 133 strict_msr_control = true; 134 pr_info("Enabling strict facility control\n"); 135 136 return 0; 137 } 138 early_param("ppc_strict_facility_enable", enable_strict_msr_control); 139 140 unsigned long msr_check_and_set(unsigned long bits) 141 { 142 unsigned long oldmsr = mfmsr(); 143 unsigned long newmsr; 144 145 newmsr = oldmsr | bits; 146 147 #ifdef CONFIG_VSX 148 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) 149 newmsr |= MSR_VSX; 150 #endif 151 152 if (oldmsr != newmsr) 153 mtmsr_isync(newmsr); 154 155 return newmsr; 156 } 157 EXPORT_SYMBOL_GPL(msr_check_and_set); 158 159 void __msr_check_and_clear(unsigned long bits) 160 { 161 unsigned long oldmsr = mfmsr(); 162 unsigned long newmsr; 163 164 newmsr = oldmsr & ~bits; 165 166 #ifdef CONFIG_VSX 167 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) 168 newmsr &= ~MSR_VSX; 169 #endif 170 171 if (oldmsr != newmsr) 172 mtmsr_isync(newmsr); 173 } 174 EXPORT_SYMBOL(__msr_check_and_clear); 175 176 #ifdef CONFIG_PPC_FPU 177 static void __giveup_fpu(struct task_struct *tsk) 178 { 179 unsigned long msr; 180 181 save_fpu(tsk); 182 msr = tsk->thread.regs->msr; 183 msr &= ~MSR_FP; 184 #ifdef CONFIG_VSX 185 if (cpu_has_feature(CPU_FTR_VSX)) 186 msr &= ~MSR_VSX; 187 #endif 188 tsk->thread.regs->msr = msr; 189 } 190 191 void giveup_fpu(struct task_struct *tsk) 192 { 193 check_if_tm_restore_required(tsk); 194 195 msr_check_and_set(MSR_FP); 196 __giveup_fpu(tsk); 197 msr_check_and_clear(MSR_FP); 198 } 199 EXPORT_SYMBOL(giveup_fpu); 200 201 /* 202 * Make sure the floating-point register state in the 203 * the thread_struct is up to date for task tsk. 204 */ 205 void flush_fp_to_thread(struct task_struct *tsk) 206 { 207 if (tsk->thread.regs) { 208 /* 209 * We need to disable preemption here because if we didn't, 210 * another process could get scheduled after the regs->msr 211 * test but before we have finished saving the FP registers 212 * to the thread_struct. That process could take over the 213 * FPU, and then when we get scheduled again we would store 214 * bogus values for the remaining FP registers. 215 */ 216 preempt_disable(); 217 if (tsk->thread.regs->msr & MSR_FP) { 218 /* 219 * This should only ever be called for current or 220 * for a stopped child process. Since we save away 221 * the FP register state on context switch, 222 * there is something wrong if a stopped child appears 223 * to still have its FP state in the CPU registers. 224 */ 225 BUG_ON(tsk != current); 226 giveup_fpu(tsk); 227 } 228 preempt_enable(); 229 } 230 } 231 EXPORT_SYMBOL_GPL(flush_fp_to_thread); 232 233 void enable_kernel_fp(void) 234 { 235 unsigned long cpumsr; 236 237 WARN_ON(preemptible()); 238 239 cpumsr = msr_check_and_set(MSR_FP); 240 241 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { 242 check_if_tm_restore_required(current); 243 /* 244 * If a thread has already been reclaimed then the 245 * checkpointed registers are on the CPU but have definitely 246 * been saved by the reclaim code. Don't need to and *cannot* 247 * giveup as this would save to the 'live' structure not the 248 * checkpointed structure. 249 */ 250 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) 251 return; 252 __giveup_fpu(current); 253 } 254 } 255 EXPORT_SYMBOL(enable_kernel_fp); 256 257 static int restore_fp(struct task_struct *tsk) 258 { 259 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { 260 load_fp_state(¤t->thread.fp_state); 261 current->thread.load_fp++; 262 return 1; 263 } 264 return 0; 265 } 266 #else 267 static int restore_fp(struct task_struct *tsk) { return 0; } 268 #endif /* CONFIG_PPC_FPU */ 269 270 #ifdef CONFIG_ALTIVEC 271 #define loadvec(thr) ((thr).load_vec) 272 273 static void __giveup_altivec(struct task_struct *tsk) 274 { 275 unsigned long msr; 276 277 save_altivec(tsk); 278 msr = tsk->thread.regs->msr; 279 msr &= ~MSR_VEC; 280 #ifdef CONFIG_VSX 281 if (cpu_has_feature(CPU_FTR_VSX)) 282 msr &= ~MSR_VSX; 283 #endif 284 tsk->thread.regs->msr = msr; 285 } 286 287 void giveup_altivec(struct task_struct *tsk) 288 { 289 check_if_tm_restore_required(tsk); 290 291 msr_check_and_set(MSR_VEC); 292 __giveup_altivec(tsk); 293 msr_check_and_clear(MSR_VEC); 294 } 295 EXPORT_SYMBOL(giveup_altivec); 296 297 void enable_kernel_altivec(void) 298 { 299 unsigned long cpumsr; 300 301 WARN_ON(preemptible()); 302 303 cpumsr = msr_check_and_set(MSR_VEC); 304 305 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { 306 check_if_tm_restore_required(current); 307 /* 308 * If a thread has already been reclaimed then the 309 * checkpointed registers are on the CPU but have definitely 310 * been saved by the reclaim code. Don't need to and *cannot* 311 * giveup as this would save to the 'live' structure not the 312 * checkpointed structure. 313 */ 314 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) 315 return; 316 __giveup_altivec(current); 317 } 318 } 319 EXPORT_SYMBOL(enable_kernel_altivec); 320 321 /* 322 * Make sure the VMX/Altivec register state in the 323 * the thread_struct is up to date for task tsk. 324 */ 325 void flush_altivec_to_thread(struct task_struct *tsk) 326 { 327 if (tsk->thread.regs) { 328 preempt_disable(); 329 if (tsk->thread.regs->msr & MSR_VEC) { 330 BUG_ON(tsk != current); 331 giveup_altivec(tsk); 332 } 333 preempt_enable(); 334 } 335 } 336 EXPORT_SYMBOL_GPL(flush_altivec_to_thread); 337 338 static int restore_altivec(struct task_struct *tsk) 339 { 340 if (cpu_has_feature(CPU_FTR_ALTIVEC) && 341 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) { 342 load_vr_state(&tsk->thread.vr_state); 343 tsk->thread.used_vr = 1; 344 tsk->thread.load_vec++; 345 346 return 1; 347 } 348 return 0; 349 } 350 #else 351 #define loadvec(thr) 0 352 static inline int restore_altivec(struct task_struct *tsk) { return 0; } 353 #endif /* CONFIG_ALTIVEC */ 354 355 #ifdef CONFIG_VSX 356 static void __giveup_vsx(struct task_struct *tsk) 357 { 358 unsigned long msr = tsk->thread.regs->msr; 359 360 /* 361 * We should never be ssetting MSR_VSX without also setting 362 * MSR_FP and MSR_VEC 363 */ 364 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC))); 365 366 /* __giveup_fpu will clear MSR_VSX */ 367 if (msr & MSR_FP) 368 __giveup_fpu(tsk); 369 if (msr & MSR_VEC) 370 __giveup_altivec(tsk); 371 } 372 373 static void giveup_vsx(struct task_struct *tsk) 374 { 375 check_if_tm_restore_required(tsk); 376 377 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); 378 __giveup_vsx(tsk); 379 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); 380 } 381 382 void enable_kernel_vsx(void) 383 { 384 unsigned long cpumsr; 385 386 WARN_ON(preemptible()); 387 388 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); 389 390 if (current->thread.regs && 391 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) { 392 check_if_tm_restore_required(current); 393 /* 394 * If a thread has already been reclaimed then the 395 * checkpointed registers are on the CPU but have definitely 396 * been saved by the reclaim code. Don't need to and *cannot* 397 * giveup as this would save to the 'live' structure not the 398 * checkpointed structure. 399 */ 400 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) 401 return; 402 __giveup_vsx(current); 403 } 404 } 405 EXPORT_SYMBOL(enable_kernel_vsx); 406 407 void flush_vsx_to_thread(struct task_struct *tsk) 408 { 409 if (tsk->thread.regs) { 410 preempt_disable(); 411 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) { 412 BUG_ON(tsk != current); 413 giveup_vsx(tsk); 414 } 415 preempt_enable(); 416 } 417 } 418 EXPORT_SYMBOL_GPL(flush_vsx_to_thread); 419 420 static int restore_vsx(struct task_struct *tsk) 421 { 422 if (cpu_has_feature(CPU_FTR_VSX)) { 423 tsk->thread.used_vsr = 1; 424 return 1; 425 } 426 427 return 0; 428 } 429 #else 430 static inline int restore_vsx(struct task_struct *tsk) { return 0; } 431 #endif /* CONFIG_VSX */ 432 433 #ifdef CONFIG_SPE 434 void giveup_spe(struct task_struct *tsk) 435 { 436 check_if_tm_restore_required(tsk); 437 438 msr_check_and_set(MSR_SPE); 439 __giveup_spe(tsk); 440 msr_check_and_clear(MSR_SPE); 441 } 442 EXPORT_SYMBOL(giveup_spe); 443 444 void enable_kernel_spe(void) 445 { 446 WARN_ON(preemptible()); 447 448 msr_check_and_set(MSR_SPE); 449 450 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { 451 check_if_tm_restore_required(current); 452 __giveup_spe(current); 453 } 454 } 455 EXPORT_SYMBOL(enable_kernel_spe); 456 457 void flush_spe_to_thread(struct task_struct *tsk) 458 { 459 if (tsk->thread.regs) { 460 preempt_disable(); 461 if (tsk->thread.regs->msr & MSR_SPE) { 462 BUG_ON(tsk != current); 463 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 464 giveup_spe(tsk); 465 } 466 preempt_enable(); 467 } 468 } 469 #endif /* CONFIG_SPE */ 470 471 static unsigned long msr_all_available; 472 473 static int __init init_msr_all_available(void) 474 { 475 #ifdef CONFIG_PPC_FPU 476 msr_all_available |= MSR_FP; 477 #endif 478 #ifdef CONFIG_ALTIVEC 479 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 480 msr_all_available |= MSR_VEC; 481 #endif 482 #ifdef CONFIG_VSX 483 if (cpu_has_feature(CPU_FTR_VSX)) 484 msr_all_available |= MSR_VSX; 485 #endif 486 #ifdef CONFIG_SPE 487 if (cpu_has_feature(CPU_FTR_SPE)) 488 msr_all_available |= MSR_SPE; 489 #endif 490 491 return 0; 492 } 493 early_initcall(init_msr_all_available); 494 495 void giveup_all(struct task_struct *tsk) 496 { 497 unsigned long usermsr; 498 499 if (!tsk->thread.regs) 500 return; 501 502 usermsr = tsk->thread.regs->msr; 503 504 if ((usermsr & msr_all_available) == 0) 505 return; 506 507 msr_check_and_set(msr_all_available); 508 check_if_tm_restore_required(tsk); 509 510 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); 511 512 #ifdef CONFIG_PPC_FPU 513 if (usermsr & MSR_FP) 514 __giveup_fpu(tsk); 515 #endif 516 #ifdef CONFIG_ALTIVEC 517 if (usermsr & MSR_VEC) 518 __giveup_altivec(tsk); 519 #endif 520 #ifdef CONFIG_SPE 521 if (usermsr & MSR_SPE) 522 __giveup_spe(tsk); 523 #endif 524 525 msr_check_and_clear(msr_all_available); 526 } 527 EXPORT_SYMBOL(giveup_all); 528 529 void restore_math(struct pt_regs *regs) 530 { 531 unsigned long msr; 532 533 if (!msr_tm_active(regs->msr) && 534 !current->thread.load_fp && !loadvec(current->thread)) 535 return; 536 537 msr = regs->msr; 538 msr_check_and_set(msr_all_available); 539 540 /* 541 * Only reload if the bit is not set in the user MSR, the bit BEING set 542 * indicates that the registers are hot 543 */ 544 if ((!(msr & MSR_FP)) && restore_fp(current)) 545 msr |= MSR_FP | current->thread.fpexc_mode; 546 547 if ((!(msr & MSR_VEC)) && restore_altivec(current)) 548 msr |= MSR_VEC; 549 550 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) && 551 restore_vsx(current)) { 552 msr |= MSR_VSX; 553 } 554 555 msr_check_and_clear(msr_all_available); 556 557 regs->msr = msr; 558 } 559 560 static void save_all(struct task_struct *tsk) 561 { 562 unsigned long usermsr; 563 564 if (!tsk->thread.regs) 565 return; 566 567 usermsr = tsk->thread.regs->msr; 568 569 if ((usermsr & msr_all_available) == 0) 570 return; 571 572 msr_check_and_set(msr_all_available); 573 574 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); 575 576 if (usermsr & MSR_FP) 577 save_fpu(tsk); 578 579 if (usermsr & MSR_VEC) 580 save_altivec(tsk); 581 582 if (usermsr & MSR_SPE) 583 __giveup_spe(tsk); 584 585 msr_check_and_clear(msr_all_available); 586 thread_pkey_regs_save(&tsk->thread); 587 } 588 589 void flush_all_to_thread(struct task_struct *tsk) 590 { 591 if (tsk->thread.regs) { 592 preempt_disable(); 593 BUG_ON(tsk != current); 594 save_all(tsk); 595 596 #ifdef CONFIG_SPE 597 if (tsk->thread.regs->msr & MSR_SPE) 598 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 599 #endif 600 601 preempt_enable(); 602 } 603 } 604 EXPORT_SYMBOL(flush_all_to_thread); 605 606 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 607 void do_send_trap(struct pt_regs *regs, unsigned long address, 608 unsigned long error_code, int breakpt) 609 { 610 current->thread.trap_nr = TRAP_HWBKPT; 611 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 612 11, SIGSEGV) == NOTIFY_STOP) 613 return; 614 615 /* Deliver the signal to userspace */ 616 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */ 617 (void __user *)address); 618 } 619 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 620 void do_break (struct pt_regs *regs, unsigned long address, 621 unsigned long error_code) 622 { 623 siginfo_t info; 624 625 current->thread.trap_nr = TRAP_HWBKPT; 626 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 627 11, SIGSEGV) == NOTIFY_STOP) 628 return; 629 630 if (debugger_break_match(regs)) 631 return; 632 633 /* Clear the breakpoint */ 634 hw_breakpoint_disable(); 635 636 /* Deliver the signal to userspace */ 637 clear_siginfo(&info); 638 info.si_signo = SIGTRAP; 639 info.si_errno = 0; 640 info.si_code = TRAP_HWBKPT; 641 info.si_addr = (void __user *)address; 642 force_sig_info(SIGTRAP, &info, current); 643 } 644 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 645 646 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); 647 648 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 649 /* 650 * Set the debug registers back to their default "safe" values. 651 */ 652 static void set_debug_reg_defaults(struct thread_struct *thread) 653 { 654 thread->debug.iac1 = thread->debug.iac2 = 0; 655 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 656 thread->debug.iac3 = thread->debug.iac4 = 0; 657 #endif 658 thread->debug.dac1 = thread->debug.dac2 = 0; 659 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 660 thread->debug.dvc1 = thread->debug.dvc2 = 0; 661 #endif 662 thread->debug.dbcr0 = 0; 663 #ifdef CONFIG_BOOKE 664 /* 665 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) 666 */ 667 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | 668 DBCR1_IAC3US | DBCR1_IAC4US; 669 /* 670 * Force Data Address Compare User/Supervisor bits to be User-only 671 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. 672 */ 673 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; 674 #else 675 thread->debug.dbcr1 = 0; 676 #endif 677 } 678 679 static void prime_debug_regs(struct debug_reg *debug) 680 { 681 /* 682 * We could have inherited MSR_DE from userspace, since 683 * it doesn't get cleared on exception entry. Make sure 684 * MSR_DE is clear before we enable any debug events. 685 */ 686 mtmsr(mfmsr() & ~MSR_DE); 687 688 mtspr(SPRN_IAC1, debug->iac1); 689 mtspr(SPRN_IAC2, debug->iac2); 690 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 691 mtspr(SPRN_IAC3, debug->iac3); 692 mtspr(SPRN_IAC4, debug->iac4); 693 #endif 694 mtspr(SPRN_DAC1, debug->dac1); 695 mtspr(SPRN_DAC2, debug->dac2); 696 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 697 mtspr(SPRN_DVC1, debug->dvc1); 698 mtspr(SPRN_DVC2, debug->dvc2); 699 #endif 700 mtspr(SPRN_DBCR0, debug->dbcr0); 701 mtspr(SPRN_DBCR1, debug->dbcr1); 702 #ifdef CONFIG_BOOKE 703 mtspr(SPRN_DBCR2, debug->dbcr2); 704 #endif 705 } 706 /* 707 * Unless neither the old or new thread are making use of the 708 * debug registers, set the debug registers from the values 709 * stored in the new thread. 710 */ 711 void switch_booke_debug_regs(struct debug_reg *new_debug) 712 { 713 if ((current->thread.debug.dbcr0 & DBCR0_IDM) 714 || (new_debug->dbcr0 & DBCR0_IDM)) 715 prime_debug_regs(new_debug); 716 } 717 EXPORT_SYMBOL_GPL(switch_booke_debug_regs); 718 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 719 #ifndef CONFIG_HAVE_HW_BREAKPOINT 720 static void set_breakpoint(struct arch_hw_breakpoint *brk) 721 { 722 preempt_disable(); 723 __set_breakpoint(brk); 724 preempt_enable(); 725 } 726 727 static void set_debug_reg_defaults(struct thread_struct *thread) 728 { 729 thread->hw_brk.address = 0; 730 thread->hw_brk.type = 0; 731 if (ppc_breakpoint_available()) 732 set_breakpoint(&thread->hw_brk); 733 } 734 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 735 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 736 737 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 738 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 739 { 740 mtspr(SPRN_DAC1, dabr); 741 #ifdef CONFIG_PPC_47x 742 isync(); 743 #endif 744 return 0; 745 } 746 #elif defined(CONFIG_PPC_BOOK3S) 747 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 748 { 749 mtspr(SPRN_DABR, dabr); 750 if (cpu_has_feature(CPU_FTR_DABRX)) 751 mtspr(SPRN_DABRX, dabrx); 752 return 0; 753 } 754 #elif defined(CONFIG_PPC_8xx) 755 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 756 { 757 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR; 758 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */ 759 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */ 760 761 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ) 762 lctrl1 |= 0xa0000; 763 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE) 764 lctrl1 |= 0xf0000; 765 else if ((dabr & HW_BRK_TYPE_RDWR) == 0) 766 lctrl2 = 0; 767 768 mtspr(SPRN_LCTRL2, 0); 769 mtspr(SPRN_CMPE, addr); 770 mtspr(SPRN_CMPF, addr + 4); 771 mtspr(SPRN_LCTRL1, lctrl1); 772 mtspr(SPRN_LCTRL2, lctrl2); 773 774 return 0; 775 } 776 #else 777 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 778 { 779 return -EINVAL; 780 } 781 #endif 782 783 static inline int set_dabr(struct arch_hw_breakpoint *brk) 784 { 785 unsigned long dabr, dabrx; 786 787 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); 788 dabrx = ((brk->type >> 3) & 0x7); 789 790 if (ppc_md.set_dabr) 791 return ppc_md.set_dabr(dabr, dabrx); 792 793 return __set_dabr(dabr, dabrx); 794 } 795 796 static inline int set_dawr(struct arch_hw_breakpoint *brk) 797 { 798 unsigned long dawr, dawrx, mrd; 799 800 dawr = brk->address; 801 802 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ 803 << (63 - 58); //* read/write bits */ 804 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ 805 << (63 - 59); //* translate */ 806 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ 807 >> 3; //* PRIM bits */ 808 /* dawr length is stored in field MDR bits 48:53. Matches range in 809 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and 810 0b111111=64DW. 811 brk->len is in bytes. 812 This aligns up to double word size, shifts and does the bias. 813 */ 814 mrd = ((brk->len + 7) >> 3) - 1; 815 dawrx |= (mrd & 0x3f) << (63 - 53); 816 817 if (ppc_md.set_dawr) 818 return ppc_md.set_dawr(dawr, dawrx); 819 mtspr(SPRN_DAWR, dawr); 820 mtspr(SPRN_DAWRX, dawrx); 821 return 0; 822 } 823 824 void __set_breakpoint(struct arch_hw_breakpoint *brk) 825 { 826 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); 827 828 if (cpu_has_feature(CPU_FTR_DAWR)) 829 // Power8 or later 830 set_dawr(brk); 831 else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) 832 // Power7 or earlier 833 set_dabr(brk); 834 else 835 // Shouldn't happen due to higher level checks 836 WARN_ON_ONCE(1); 837 } 838 839 /* Check if we have DAWR or DABR hardware */ 840 bool ppc_breakpoint_available(void) 841 { 842 if (cpu_has_feature(CPU_FTR_DAWR)) 843 return true; /* POWER8 DAWR */ 844 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 845 return false; /* POWER9 with DAWR disabled */ 846 /* DABR: Everything but POWER8 and POWER9 */ 847 return true; 848 } 849 EXPORT_SYMBOL_GPL(ppc_breakpoint_available); 850 851 static inline bool hw_brk_match(struct arch_hw_breakpoint *a, 852 struct arch_hw_breakpoint *b) 853 { 854 if (a->address != b->address) 855 return false; 856 if (a->type != b->type) 857 return false; 858 if (a->len != b->len) 859 return false; 860 return true; 861 } 862 863 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 864 865 static inline bool tm_enabled(struct task_struct *tsk) 866 { 867 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM); 868 } 869 870 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause) 871 { 872 /* 873 * Use the current MSR TM suspended bit to track if we have 874 * checkpointed state outstanding. 875 * On signal delivery, we'd normally reclaim the checkpointed 876 * state to obtain stack pointer (see:get_tm_stackpointer()). 877 * This will then directly return to userspace without going 878 * through __switch_to(). However, if the stack frame is bad, 879 * we need to exit this thread which calls __switch_to() which 880 * will again attempt to reclaim the already saved tm state. 881 * Hence we need to check that we've not already reclaimed 882 * this state. 883 * We do this using the current MSR, rather tracking it in 884 * some specific thread_struct bit, as it has the additional 885 * benefit of checking for a potential TM bad thing exception. 886 */ 887 if (!MSR_TM_SUSPENDED(mfmsr())) 888 return; 889 890 giveup_all(container_of(thr, struct task_struct, thread)); 891 892 tm_reclaim(thr, cause); 893 894 /* 895 * If we are in a transaction and FP is off then we can't have 896 * used FP inside that transaction. Hence the checkpointed 897 * state is the same as the live state. We need to copy the 898 * live state to the checkpointed state so that when the 899 * transaction is restored, the checkpointed state is correct 900 * and the aborted transaction sees the correct state. We use 901 * ckpt_regs.msr here as that's what tm_reclaim will use to 902 * determine if it's going to write the checkpointed state or 903 * not. So either this will write the checkpointed registers, 904 * or reclaim will. Similarly for VMX. 905 */ 906 if ((thr->ckpt_regs.msr & MSR_FP) == 0) 907 memcpy(&thr->ckfp_state, &thr->fp_state, 908 sizeof(struct thread_fp_state)); 909 if ((thr->ckpt_regs.msr & MSR_VEC) == 0) 910 memcpy(&thr->ckvr_state, &thr->vr_state, 911 sizeof(struct thread_vr_state)); 912 } 913 914 void tm_reclaim_current(uint8_t cause) 915 { 916 tm_enable(); 917 tm_reclaim_thread(¤t->thread, cause); 918 } 919 920 static inline void tm_reclaim_task(struct task_struct *tsk) 921 { 922 /* We have to work out if we're switching from/to a task that's in the 923 * middle of a transaction. 924 * 925 * In switching we need to maintain a 2nd register state as 926 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the 927 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and 928 * ckvr_state 929 * 930 * We also context switch (save) TFHAR/TEXASR/TFIAR in here. 931 */ 932 struct thread_struct *thr = &tsk->thread; 933 934 if (!thr->regs) 935 return; 936 937 if (!MSR_TM_ACTIVE(thr->regs->msr)) 938 goto out_and_saveregs; 939 940 WARN_ON(tm_suspend_disabled); 941 942 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " 943 "ccr=%lx, msr=%lx, trap=%lx)\n", 944 tsk->pid, thr->regs->nip, 945 thr->regs->ccr, thr->regs->msr, 946 thr->regs->trap); 947 948 tm_reclaim_thread(thr, TM_CAUSE_RESCHED); 949 950 TM_DEBUG("--- tm_reclaim on pid %d complete\n", 951 tsk->pid); 952 953 out_and_saveregs: 954 /* Always save the regs here, even if a transaction's not active. 955 * This context-switches a thread's TM info SPRs. We do it here to 956 * be consistent with the restore path (in recheckpoint) which 957 * cannot happen later in _switch(). 958 */ 959 tm_save_sprs(thr); 960 } 961 962 extern void __tm_recheckpoint(struct thread_struct *thread); 963 964 void tm_recheckpoint(struct thread_struct *thread) 965 { 966 unsigned long flags; 967 968 if (!(thread->regs->msr & MSR_TM)) 969 return; 970 971 /* We really can't be interrupted here as the TEXASR registers can't 972 * change and later in the trecheckpoint code, we have a userspace R1. 973 * So let's hard disable over this region. 974 */ 975 local_irq_save(flags); 976 hard_irq_disable(); 977 978 /* The TM SPRs are restored here, so that TEXASR.FS can be set 979 * before the trecheckpoint and no explosion occurs. 980 */ 981 tm_restore_sprs(thread); 982 983 __tm_recheckpoint(thread); 984 985 local_irq_restore(flags); 986 } 987 988 static inline void tm_recheckpoint_new_task(struct task_struct *new) 989 { 990 if (!cpu_has_feature(CPU_FTR_TM)) 991 return; 992 993 /* Recheckpoint the registers of the thread we're about to switch to. 994 * 995 * If the task was using FP, we non-lazily reload both the original and 996 * the speculative FP register states. This is because the kernel 997 * doesn't see if/when a TM rollback occurs, so if we take an FP 998 * unavailable later, we are unable to determine which set of FP regs 999 * need to be restored. 1000 */ 1001 if (!tm_enabled(new)) 1002 return; 1003 1004 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ 1005 tm_restore_sprs(&new->thread); 1006 return; 1007 } 1008 /* Recheckpoint to restore original checkpointed register state. */ 1009 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n", 1010 new->pid, new->thread.regs->msr); 1011 1012 tm_recheckpoint(&new->thread); 1013 1014 /* 1015 * The checkpointed state has been restored but the live state has 1016 * not, ensure all the math functionality is turned off to trigger 1017 * restore_math() to reload. 1018 */ 1019 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX); 1020 1021 TM_DEBUG("*** tm_recheckpoint of pid %d complete " 1022 "(kernel msr 0x%lx)\n", 1023 new->pid, mfmsr()); 1024 } 1025 1026 static inline void __switch_to_tm(struct task_struct *prev, 1027 struct task_struct *new) 1028 { 1029 if (cpu_has_feature(CPU_FTR_TM)) { 1030 if (tm_enabled(prev) || tm_enabled(new)) 1031 tm_enable(); 1032 1033 if (tm_enabled(prev)) { 1034 prev->thread.load_tm++; 1035 tm_reclaim_task(prev); 1036 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0) 1037 prev->thread.regs->msr &= ~MSR_TM; 1038 } 1039 1040 tm_recheckpoint_new_task(new); 1041 } 1042 } 1043 1044 /* 1045 * This is called if we are on the way out to userspace and the 1046 * TIF_RESTORE_TM flag is set. It checks if we need to reload 1047 * FP and/or vector state and does so if necessary. 1048 * If userspace is inside a transaction (whether active or 1049 * suspended) and FP/VMX/VSX instructions have ever been enabled 1050 * inside that transaction, then we have to keep them enabled 1051 * and keep the FP/VMX/VSX state loaded while ever the transaction 1052 * continues. The reason is that if we didn't, and subsequently 1053 * got a FP/VMX/VSX unavailable interrupt inside a transaction, 1054 * we don't know whether it's the same transaction, and thus we 1055 * don't know which of the checkpointed state and the transactional 1056 * state to use. 1057 */ 1058 void restore_tm_state(struct pt_regs *regs) 1059 { 1060 unsigned long msr_diff; 1061 1062 /* 1063 * This is the only moment we should clear TIF_RESTORE_TM as 1064 * it is here that ckpt_regs.msr and pt_regs.msr become the same 1065 * again, anything else could lead to an incorrect ckpt_msr being 1066 * saved and therefore incorrect signal contexts. 1067 */ 1068 clear_thread_flag(TIF_RESTORE_TM); 1069 if (!MSR_TM_ACTIVE(regs->msr)) 1070 return; 1071 1072 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; 1073 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; 1074 1075 /* Ensure that restore_math() will restore */ 1076 if (msr_diff & MSR_FP) 1077 current->thread.load_fp = 1; 1078 #ifdef CONFIG_ALTIVEC 1079 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC) 1080 current->thread.load_vec = 1; 1081 #endif 1082 restore_math(regs); 1083 1084 regs->msr |= msr_diff; 1085 } 1086 1087 #else 1088 #define tm_recheckpoint_new_task(new) 1089 #define __switch_to_tm(prev, new) 1090 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1091 1092 static inline void save_sprs(struct thread_struct *t) 1093 { 1094 #ifdef CONFIG_ALTIVEC 1095 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1096 t->vrsave = mfspr(SPRN_VRSAVE); 1097 #endif 1098 #ifdef CONFIG_PPC_BOOK3S_64 1099 if (cpu_has_feature(CPU_FTR_DSCR)) 1100 t->dscr = mfspr(SPRN_DSCR); 1101 1102 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 1103 t->bescr = mfspr(SPRN_BESCR); 1104 t->ebbhr = mfspr(SPRN_EBBHR); 1105 t->ebbrr = mfspr(SPRN_EBBRR); 1106 1107 t->fscr = mfspr(SPRN_FSCR); 1108 1109 /* 1110 * Note that the TAR is not available for use in the kernel. 1111 * (To provide this, the TAR should be backed up/restored on 1112 * exception entry/exit instead, and be in pt_regs. FIXME, 1113 * this should be in pt_regs anyway (for debug).) 1114 */ 1115 t->tar = mfspr(SPRN_TAR); 1116 } 1117 #endif 1118 1119 thread_pkey_regs_save(t); 1120 } 1121 1122 static inline void restore_sprs(struct thread_struct *old_thread, 1123 struct thread_struct *new_thread) 1124 { 1125 #ifdef CONFIG_ALTIVEC 1126 if (cpu_has_feature(CPU_FTR_ALTIVEC) && 1127 old_thread->vrsave != new_thread->vrsave) 1128 mtspr(SPRN_VRSAVE, new_thread->vrsave); 1129 #endif 1130 #ifdef CONFIG_PPC_BOOK3S_64 1131 if (cpu_has_feature(CPU_FTR_DSCR)) { 1132 u64 dscr = get_paca()->dscr_default; 1133 if (new_thread->dscr_inherit) 1134 dscr = new_thread->dscr; 1135 1136 if (old_thread->dscr != dscr) 1137 mtspr(SPRN_DSCR, dscr); 1138 } 1139 1140 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 1141 if (old_thread->bescr != new_thread->bescr) 1142 mtspr(SPRN_BESCR, new_thread->bescr); 1143 if (old_thread->ebbhr != new_thread->ebbhr) 1144 mtspr(SPRN_EBBHR, new_thread->ebbhr); 1145 if (old_thread->ebbrr != new_thread->ebbrr) 1146 mtspr(SPRN_EBBRR, new_thread->ebbrr); 1147 1148 if (old_thread->fscr != new_thread->fscr) 1149 mtspr(SPRN_FSCR, new_thread->fscr); 1150 1151 if (old_thread->tar != new_thread->tar) 1152 mtspr(SPRN_TAR, new_thread->tar); 1153 } 1154 1155 if (cpu_has_feature(CPU_FTR_P9_TIDR) && 1156 old_thread->tidr != new_thread->tidr) 1157 mtspr(SPRN_TIDR, new_thread->tidr); 1158 #endif 1159 1160 thread_pkey_regs_restore(new_thread, old_thread); 1161 } 1162 1163 #ifdef CONFIG_PPC_BOOK3S_64 1164 #define CP_SIZE 128 1165 static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE))); 1166 #endif 1167 1168 struct task_struct *__switch_to(struct task_struct *prev, 1169 struct task_struct *new) 1170 { 1171 struct thread_struct *new_thread, *old_thread; 1172 struct task_struct *last; 1173 #ifdef CONFIG_PPC_BOOK3S_64 1174 struct ppc64_tlb_batch *batch; 1175 #endif 1176 1177 new_thread = &new->thread; 1178 old_thread = ¤t->thread; 1179 1180 WARN_ON(!irqs_disabled()); 1181 1182 #ifdef CONFIG_PPC_BOOK3S_64 1183 batch = this_cpu_ptr(&ppc64_tlb_batch); 1184 if (batch->active) { 1185 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 1186 if (batch->index) 1187 __flush_tlb_pending(batch); 1188 batch->active = 0; 1189 } 1190 #endif /* CONFIG_PPC_BOOK3S_64 */ 1191 1192 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1193 switch_booke_debug_regs(&new->thread.debug); 1194 #else 1195 /* 1196 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 1197 * schedule DABR 1198 */ 1199 #ifndef CONFIG_HAVE_HW_BREAKPOINT 1200 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) 1201 __set_breakpoint(&new->thread.hw_brk); 1202 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1203 #endif 1204 1205 /* 1206 * We need to save SPRs before treclaim/trecheckpoint as these will 1207 * change a number of them. 1208 */ 1209 save_sprs(&prev->thread); 1210 1211 /* Save FPU, Altivec, VSX and SPE state */ 1212 giveup_all(prev); 1213 1214 __switch_to_tm(prev, new); 1215 1216 if (!radix_enabled()) { 1217 /* 1218 * We can't take a PMU exception inside _switch() since there 1219 * is a window where the kernel stack SLB and the kernel stack 1220 * are out of sync. Hard disable here. 1221 */ 1222 hard_irq_disable(); 1223 } 1224 1225 /* 1226 * Call restore_sprs() before calling _switch(). If we move it after 1227 * _switch() then we miss out on calling it for new tasks. The reason 1228 * for this is we manually create a stack frame for new tasks that 1229 * directly returns through ret_from_fork() or 1230 * ret_from_kernel_thread(). See copy_thread() for details. 1231 */ 1232 restore_sprs(old_thread, new_thread); 1233 1234 last = _switch(old_thread, new_thread); 1235 1236 #ifdef CONFIG_PPC_BOOK3S_64 1237 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 1238 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 1239 batch = this_cpu_ptr(&ppc64_tlb_batch); 1240 batch->active = 1; 1241 } 1242 1243 if (current_thread_info()->task->thread.regs) { 1244 restore_math(current_thread_info()->task->thread.regs); 1245 1246 /* 1247 * The copy-paste buffer can only store into foreign real 1248 * addresses, so unprivileged processes can not see the 1249 * data or use it in any way unless they have foreign real 1250 * mappings. If the new process has the foreign real address 1251 * mappings, we must issue a cp_abort to clear any state and 1252 * prevent snooping, corruption or a covert channel. 1253 */ 1254 if (current_thread_info()->task->thread.used_vas) 1255 asm volatile(PPC_CP_ABORT); 1256 } 1257 #endif /* CONFIG_PPC_BOOK3S_64 */ 1258 1259 return last; 1260 } 1261 1262 static int instructions_to_print = 16; 1263 1264 static void show_instructions(struct pt_regs *regs) 1265 { 1266 int i; 1267 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * 1268 sizeof(int)); 1269 1270 printk("Instruction dump:"); 1271 1272 for (i = 0; i < instructions_to_print; i++) { 1273 int instr; 1274 1275 if (!(i % 8)) 1276 pr_cont("\n"); 1277 1278 #if !defined(CONFIG_BOOKE) 1279 /* If executing with the IMMU off, adjust pc rather 1280 * than print XXXXXXXX. 1281 */ 1282 if (!(regs->msr & MSR_IR)) 1283 pc = (unsigned long)phys_to_virt(pc); 1284 #endif 1285 1286 if (!__kernel_text_address(pc) || 1287 probe_kernel_address((unsigned int __user *)pc, instr)) { 1288 pr_cont("XXXXXXXX "); 1289 } else { 1290 if (regs->nip == pc) 1291 pr_cont("<%08x> ", instr); 1292 else 1293 pr_cont("%08x ", instr); 1294 } 1295 1296 pc += sizeof(int); 1297 } 1298 1299 pr_cont("\n"); 1300 } 1301 1302 void show_user_instructions(struct pt_regs *regs) 1303 { 1304 unsigned long pc; 1305 int i; 1306 1307 pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); 1308 1309 /* 1310 * Make sure the NIP points at userspace, not kernel text/data or 1311 * elsewhere. 1312 */ 1313 if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) { 1314 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n", 1315 current->comm, current->pid); 1316 return; 1317 } 1318 1319 pr_info("%s[%d]: code: ", current->comm, current->pid); 1320 1321 for (i = 0; i < instructions_to_print; i++) { 1322 int instr; 1323 1324 if (!(i % 8) && (i > 0)) { 1325 pr_cont("\n"); 1326 pr_info("%s[%d]: code: ", current->comm, current->pid); 1327 } 1328 1329 if (probe_kernel_address((unsigned int __user *)pc, instr)) { 1330 pr_cont("XXXXXXXX "); 1331 } else { 1332 if (regs->nip == pc) 1333 pr_cont("<%08x> ", instr); 1334 else 1335 pr_cont("%08x ", instr); 1336 } 1337 1338 pc += sizeof(int); 1339 } 1340 1341 pr_cont("\n"); 1342 } 1343 1344 struct regbit { 1345 unsigned long bit; 1346 const char *name; 1347 }; 1348 1349 static struct regbit msr_bits[] = { 1350 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) 1351 {MSR_SF, "SF"}, 1352 {MSR_HV, "HV"}, 1353 #endif 1354 {MSR_VEC, "VEC"}, 1355 {MSR_VSX, "VSX"}, 1356 #ifdef CONFIG_BOOKE 1357 {MSR_CE, "CE"}, 1358 #endif 1359 {MSR_EE, "EE"}, 1360 {MSR_PR, "PR"}, 1361 {MSR_FP, "FP"}, 1362 {MSR_ME, "ME"}, 1363 #ifdef CONFIG_BOOKE 1364 {MSR_DE, "DE"}, 1365 #else 1366 {MSR_SE, "SE"}, 1367 {MSR_BE, "BE"}, 1368 #endif 1369 {MSR_IR, "IR"}, 1370 {MSR_DR, "DR"}, 1371 {MSR_PMM, "PMM"}, 1372 #ifndef CONFIG_BOOKE 1373 {MSR_RI, "RI"}, 1374 {MSR_LE, "LE"}, 1375 #endif 1376 {0, NULL} 1377 }; 1378 1379 static void print_bits(unsigned long val, struct regbit *bits, const char *sep) 1380 { 1381 const char *s = ""; 1382 1383 for (; bits->bit; ++bits) 1384 if (val & bits->bit) { 1385 pr_cont("%s%s", s, bits->name); 1386 s = sep; 1387 } 1388 } 1389 1390 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1391 static struct regbit msr_tm_bits[] = { 1392 {MSR_TS_T, "T"}, 1393 {MSR_TS_S, "S"}, 1394 {MSR_TM, "E"}, 1395 {0, NULL} 1396 }; 1397 1398 static void print_tm_bits(unsigned long val) 1399 { 1400 /* 1401 * This only prints something if at least one of the TM bit is set. 1402 * Inside the TM[], the output means: 1403 * E: Enabled (bit 32) 1404 * S: Suspended (bit 33) 1405 * T: Transactional (bit 34) 1406 */ 1407 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { 1408 pr_cont(",TM["); 1409 print_bits(val, msr_tm_bits, ""); 1410 pr_cont("]"); 1411 } 1412 } 1413 #else 1414 static void print_tm_bits(unsigned long val) {} 1415 #endif 1416 1417 static void print_msr_bits(unsigned long val) 1418 { 1419 pr_cont("<"); 1420 print_bits(val, msr_bits, ","); 1421 print_tm_bits(val); 1422 pr_cont(">"); 1423 } 1424 1425 #ifdef CONFIG_PPC64 1426 #define REG "%016lx" 1427 #define REGS_PER_LINE 4 1428 #define LAST_VOLATILE 13 1429 #else 1430 #define REG "%08lx" 1431 #define REGS_PER_LINE 8 1432 #define LAST_VOLATILE 12 1433 #endif 1434 1435 void show_regs(struct pt_regs * regs) 1436 { 1437 int i, trap; 1438 1439 show_regs_print_info(KERN_DEFAULT); 1440 1441 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 1442 regs->nip, regs->link, regs->ctr); 1443 printk("REGS: %px TRAP: %04lx %s (%s)\n", 1444 regs, regs->trap, print_tainted(), init_utsname()->release); 1445 printk("MSR: "REG" ", regs->msr); 1446 print_msr_bits(regs->msr); 1447 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 1448 trap = TRAP(regs); 1449 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 1450 pr_cont("CFAR: "REG" ", regs->orig_gpr3); 1451 if (trap == 0x200 || trap == 0x300 || trap == 0x600) 1452 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 1453 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1454 #else 1455 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1456 #endif 1457 #ifdef CONFIG_PPC64 1458 pr_cont("IRQMASK: %lx ", regs->softe); 1459 #endif 1460 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1461 if (MSR_TM_ACTIVE(regs->msr)) 1462 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1463 #endif 1464 1465 for (i = 0; i < 32; i++) { 1466 if ((i % REGS_PER_LINE) == 0) 1467 pr_cont("\nGPR%02d: ", i); 1468 pr_cont(REG " ", regs->gpr[i]); 1469 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 1470 break; 1471 } 1472 pr_cont("\n"); 1473 #ifdef CONFIG_KALLSYMS 1474 /* 1475 * Lookup NIP late so we have the best change of getting the 1476 * above info out without failing 1477 */ 1478 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); 1479 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); 1480 #endif 1481 show_stack(current, (unsigned long *) regs->gpr[1]); 1482 if (!user_mode(regs)) 1483 show_instructions(regs); 1484 } 1485 1486 void flush_thread(void) 1487 { 1488 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1489 flush_ptrace_hw_breakpoint(current); 1490 #else /* CONFIG_HAVE_HW_BREAKPOINT */ 1491 set_debug_reg_defaults(¤t->thread); 1492 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1493 } 1494 1495 int set_thread_uses_vas(void) 1496 { 1497 #ifdef CONFIG_PPC_BOOK3S_64 1498 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1499 return -EINVAL; 1500 1501 current->thread.used_vas = 1; 1502 1503 /* 1504 * Even a process that has no foreign real address mapping can use 1505 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT 1506 * to clear any pending COPY and prevent a covert channel. 1507 * 1508 * __switch_to() will issue CP_ABORT on future context switches. 1509 */ 1510 asm volatile(PPC_CP_ABORT); 1511 1512 #endif /* CONFIG_PPC_BOOK3S_64 */ 1513 return 0; 1514 } 1515 1516 #ifdef CONFIG_PPC64 1517 /** 1518 * Assign a TIDR (thread ID) for task @t and set it in the thread 1519 * structure. For now, we only support setting TIDR for 'current' task. 1520 * 1521 * Since the TID value is a truncated form of it PID, it is possible 1522 * (but unlikely) for 2 threads to have the same TID. In the unlikely event 1523 * that 2 threads share the same TID and are waiting, one of the following 1524 * cases will happen: 1525 * 1526 * 1. The correct thread is running, the wrong thread is not 1527 * In this situation, the correct thread is woken and proceeds to pass it's 1528 * condition check. 1529 * 1530 * 2. Neither threads are running 1531 * In this situation, neither thread will be woken. When scheduled, the waiting 1532 * threads will execute either a wait, which will return immediately, followed 1533 * by a condition check, which will pass for the correct thread and fail 1534 * for the wrong thread, or they will execute the condition check immediately. 1535 * 1536 * 3. The wrong thread is running, the correct thread is not 1537 * The wrong thread will be woken, but will fail it's condition check and 1538 * re-execute wait. The correct thread, when scheduled, will execute either 1539 * it's condition check (which will pass), or wait, which returns immediately 1540 * when called the first time after the thread is scheduled, followed by it's 1541 * condition check (which will pass). 1542 * 1543 * 4. Both threads are running 1544 * Both threads will be woken. The wrong thread will fail it's condition check 1545 * and execute another wait, while the correct thread will pass it's condition 1546 * check. 1547 * 1548 * @t: the task to set the thread ID for 1549 */ 1550 int set_thread_tidr(struct task_struct *t) 1551 { 1552 if (!cpu_has_feature(CPU_FTR_P9_TIDR)) 1553 return -EINVAL; 1554 1555 if (t != current) 1556 return -EINVAL; 1557 1558 if (t->thread.tidr) 1559 return 0; 1560 1561 t->thread.tidr = (u16)task_pid_nr(t); 1562 mtspr(SPRN_TIDR, t->thread.tidr); 1563 1564 return 0; 1565 } 1566 EXPORT_SYMBOL_GPL(set_thread_tidr); 1567 1568 #endif /* CONFIG_PPC64 */ 1569 1570 void 1571 release_thread(struct task_struct *t) 1572 { 1573 } 1574 1575 /* 1576 * this gets called so that we can store coprocessor state into memory and 1577 * copy the current task into the new thread. 1578 */ 1579 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 1580 { 1581 flush_all_to_thread(src); 1582 /* 1583 * Flush TM state out so we can copy it. __switch_to_tm() does this 1584 * flush but it removes the checkpointed state from the current CPU and 1585 * transitions the CPU out of TM mode. Hence we need to call 1586 * tm_recheckpoint_new_task() (on the same task) to restore the 1587 * checkpointed state back and the TM mode. 1588 * 1589 * Can't pass dst because it isn't ready. Doesn't matter, passing 1590 * dst is only important for __switch_to() 1591 */ 1592 __switch_to_tm(src, src); 1593 1594 *dst = *src; 1595 1596 clear_task_ebb(dst); 1597 1598 return 0; 1599 } 1600 1601 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) 1602 { 1603 #ifdef CONFIG_PPC_BOOK3S_64 1604 unsigned long sp_vsid; 1605 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 1606 1607 if (radix_enabled()) 1608 return; 1609 1610 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 1611 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) 1612 << SLB_VSID_SHIFT_1T; 1613 else 1614 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) 1615 << SLB_VSID_SHIFT; 1616 sp_vsid |= SLB_VSID_KERNEL | llp; 1617 p->thread.ksp_vsid = sp_vsid; 1618 #endif 1619 } 1620 1621 /* 1622 * Copy a thread.. 1623 */ 1624 1625 /* 1626 * Copy architecture-specific thread state 1627 */ 1628 int copy_thread(unsigned long clone_flags, unsigned long usp, 1629 unsigned long kthread_arg, struct task_struct *p) 1630 { 1631 struct pt_regs *childregs, *kregs; 1632 extern void ret_from_fork(void); 1633 extern void ret_from_kernel_thread(void); 1634 void (*f)(void); 1635 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 1636 struct thread_info *ti = task_thread_info(p); 1637 1638 klp_init_thread_info(ti); 1639 1640 /* Copy registers */ 1641 sp -= sizeof(struct pt_regs); 1642 childregs = (struct pt_regs *) sp; 1643 if (unlikely(p->flags & PF_KTHREAD)) { 1644 /* kernel thread */ 1645 memset(childregs, 0, sizeof(struct pt_regs)); 1646 childregs->gpr[1] = sp + sizeof(struct pt_regs); 1647 /* function */ 1648 if (usp) 1649 childregs->gpr[14] = ppc_function_entry((void *)usp); 1650 #ifdef CONFIG_PPC64 1651 clear_tsk_thread_flag(p, TIF_32BIT); 1652 childregs->softe = IRQS_ENABLED; 1653 #endif 1654 childregs->gpr[15] = kthread_arg; 1655 p->thread.regs = NULL; /* no user register state */ 1656 ti->flags |= _TIF_RESTOREALL; 1657 f = ret_from_kernel_thread; 1658 } else { 1659 /* user thread */ 1660 struct pt_regs *regs = current_pt_regs(); 1661 CHECK_FULL_REGS(regs); 1662 *childregs = *regs; 1663 if (usp) 1664 childregs->gpr[1] = usp; 1665 p->thread.regs = childregs; 1666 childregs->gpr[3] = 0; /* Result from fork() */ 1667 if (clone_flags & CLONE_SETTLS) { 1668 #ifdef CONFIG_PPC64 1669 if (!is_32bit_task()) 1670 childregs->gpr[13] = childregs->gpr[6]; 1671 else 1672 #endif 1673 childregs->gpr[2] = childregs->gpr[6]; 1674 } 1675 1676 f = ret_from_fork; 1677 } 1678 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); 1679 sp -= STACK_FRAME_OVERHEAD; 1680 1681 /* 1682 * The way this works is that at some point in the future 1683 * some task will call _switch to switch to the new task. 1684 * That will pop off the stack frame created below and start 1685 * the new task running at ret_from_fork. The new task will 1686 * do some house keeping and then return from the fork or clone 1687 * system call, using the stack frame created above. 1688 */ 1689 ((unsigned long *)sp)[0] = 0; 1690 sp -= sizeof(struct pt_regs); 1691 kregs = (struct pt_regs *) sp; 1692 sp -= STACK_FRAME_OVERHEAD; 1693 p->thread.ksp = sp; 1694 #ifdef CONFIG_PPC32 1695 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1696 _ALIGN_UP(sizeof(struct thread_info), 16); 1697 #endif 1698 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1699 p->thread.ptrace_bps[0] = NULL; 1700 #endif 1701 1702 p->thread.fp_save_area = NULL; 1703 #ifdef CONFIG_ALTIVEC 1704 p->thread.vr_save_area = NULL; 1705 #endif 1706 1707 setup_ksp_vsid(p, sp); 1708 1709 #ifdef CONFIG_PPC64 1710 if (cpu_has_feature(CPU_FTR_DSCR)) { 1711 p->thread.dscr_inherit = current->thread.dscr_inherit; 1712 p->thread.dscr = mfspr(SPRN_DSCR); 1713 } 1714 if (cpu_has_feature(CPU_FTR_HAS_PPR)) 1715 p->thread.ppr = INIT_PPR; 1716 1717 p->thread.tidr = 0; 1718 #endif 1719 kregs->nip = ppc_function_entry(f); 1720 return 0; 1721 } 1722 1723 /* 1724 * Set up a thread for executing a new program 1725 */ 1726 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) 1727 { 1728 #ifdef CONFIG_PPC64 1729 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ 1730 #endif 1731 1732 /* 1733 * If we exec out of a kernel thread then thread.regs will not be 1734 * set. Do it now. 1735 */ 1736 if (!current->thread.regs) { 1737 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; 1738 current->thread.regs = regs - 1; 1739 } 1740 1741 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1742 /* 1743 * Clear any transactional state, we're exec()ing. The cause is 1744 * not important as there will never be a recheckpoint so it's not 1745 * user visible. 1746 */ 1747 if (MSR_TM_SUSPENDED(mfmsr())) 1748 tm_reclaim_current(0); 1749 #endif 1750 1751 memset(regs->gpr, 0, sizeof(regs->gpr)); 1752 regs->ctr = 0; 1753 regs->link = 0; 1754 regs->xer = 0; 1755 regs->ccr = 0; 1756 regs->gpr[1] = sp; 1757 1758 /* 1759 * We have just cleared all the nonvolatile GPRs, so make 1760 * FULL_REGS(regs) return true. This is necessary to allow 1761 * ptrace to examine the thread immediately after exec. 1762 */ 1763 regs->trap &= ~1UL; 1764 1765 #ifdef CONFIG_PPC32 1766 regs->mq = 0; 1767 regs->nip = start; 1768 regs->msr = MSR_USER; 1769 #else 1770 if (!is_32bit_task()) { 1771 unsigned long entry; 1772 1773 if (is_elf2_task()) { 1774 /* Look ma, no function descriptors! */ 1775 entry = start; 1776 1777 /* 1778 * Ulrich says: 1779 * The latest iteration of the ABI requires that when 1780 * calling a function (at its global entry point), 1781 * the caller must ensure r12 holds the entry point 1782 * address (so that the function can quickly 1783 * establish addressability). 1784 */ 1785 regs->gpr[12] = start; 1786 /* Make sure that's restored on entry to userspace. */ 1787 set_thread_flag(TIF_RESTOREALL); 1788 } else { 1789 unsigned long toc; 1790 1791 /* start is a relocated pointer to the function 1792 * descriptor for the elf _start routine. The first 1793 * entry in the function descriptor is the entry 1794 * address of _start and the second entry is the TOC 1795 * value we need to use. 1796 */ 1797 __get_user(entry, (unsigned long __user *)start); 1798 __get_user(toc, (unsigned long __user *)start+1); 1799 1800 /* Check whether the e_entry function descriptor entries 1801 * need to be relocated before we can use them. 1802 */ 1803 if (load_addr != 0) { 1804 entry += load_addr; 1805 toc += load_addr; 1806 } 1807 regs->gpr[2] = toc; 1808 } 1809 regs->nip = entry; 1810 regs->msr = MSR_USER64; 1811 } else { 1812 regs->nip = start; 1813 regs->gpr[2] = 0; 1814 regs->msr = MSR_USER32; 1815 } 1816 #endif 1817 #ifdef CONFIG_VSX 1818 current->thread.used_vsr = 0; 1819 #endif 1820 current->thread.load_fp = 0; 1821 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1822 current->thread.fp_save_area = NULL; 1823 #ifdef CONFIG_ALTIVEC 1824 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); 1825 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ 1826 current->thread.vr_save_area = NULL; 1827 current->thread.vrsave = 0; 1828 current->thread.used_vr = 0; 1829 current->thread.load_vec = 0; 1830 #endif /* CONFIG_ALTIVEC */ 1831 #ifdef CONFIG_SPE 1832 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 1833 current->thread.acc = 0; 1834 current->thread.spefscr = 0; 1835 current->thread.used_spe = 0; 1836 #endif /* CONFIG_SPE */ 1837 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1838 current->thread.tm_tfhar = 0; 1839 current->thread.tm_texasr = 0; 1840 current->thread.tm_tfiar = 0; 1841 current->thread.load_tm = 0; 1842 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1843 1844 thread_pkey_regs_init(¤t->thread); 1845 } 1846 EXPORT_SYMBOL(start_thread); 1847 1848 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 1849 | PR_FP_EXC_RES | PR_FP_EXC_INV) 1850 1851 int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 1852 { 1853 struct pt_regs *regs = tsk->thread.regs; 1854 1855 /* This is a bit hairy. If we are an SPE enabled processor 1856 * (have embedded fp) we store the IEEE exception enable flags in 1857 * fpexc_mode. fpexc_mode is also used for setting FP exception 1858 * mode (asyn, precise, disabled) for 'Classic' FP. */ 1859 if (val & PR_FP_EXC_SW_ENABLE) { 1860 #ifdef CONFIG_SPE 1861 if (cpu_has_feature(CPU_FTR_SPE)) { 1862 /* 1863 * When the sticky exception bits are set 1864 * directly by userspace, it must call prctl 1865 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE 1866 * in the existing prctl settings) or 1867 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in 1868 * the bits being set). <fenv.h> functions 1869 * saving and restoring the whole 1870 * floating-point environment need to do so 1871 * anyway to restore the prctl settings from 1872 * the saved environment. 1873 */ 1874 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); 1875 tsk->thread.fpexc_mode = val & 1876 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 1877 return 0; 1878 } else { 1879 return -EINVAL; 1880 } 1881 #else 1882 return -EINVAL; 1883 #endif 1884 } 1885 1886 /* on a CONFIG_SPE this does not hurt us. The bits that 1887 * __pack_fe01 use do not overlap with bits used for 1888 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 1889 * on CONFIG_SPE implementations are reserved so writing to 1890 * them does not change anything */ 1891 if (val > PR_FP_EXC_PRECISE) 1892 return -EINVAL; 1893 tsk->thread.fpexc_mode = __pack_fe01(val); 1894 if (regs != NULL && (regs->msr & MSR_FP) != 0) 1895 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 1896 | tsk->thread.fpexc_mode; 1897 return 0; 1898 } 1899 1900 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 1901 { 1902 unsigned int val; 1903 1904 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 1905 #ifdef CONFIG_SPE 1906 if (cpu_has_feature(CPU_FTR_SPE)) { 1907 /* 1908 * When the sticky exception bits are set 1909 * directly by userspace, it must call prctl 1910 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE 1911 * in the existing prctl settings) or 1912 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in 1913 * the bits being set). <fenv.h> functions 1914 * saving and restoring the whole 1915 * floating-point environment need to do so 1916 * anyway to restore the prctl settings from 1917 * the saved environment. 1918 */ 1919 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); 1920 val = tsk->thread.fpexc_mode; 1921 } else 1922 return -EINVAL; 1923 #else 1924 return -EINVAL; 1925 #endif 1926 else 1927 val = __unpack_fe01(tsk->thread.fpexc_mode); 1928 return put_user(val, (unsigned int __user *) adr); 1929 } 1930 1931 int set_endian(struct task_struct *tsk, unsigned int val) 1932 { 1933 struct pt_regs *regs = tsk->thread.regs; 1934 1935 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || 1936 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) 1937 return -EINVAL; 1938 1939 if (regs == NULL) 1940 return -EINVAL; 1941 1942 if (val == PR_ENDIAN_BIG) 1943 regs->msr &= ~MSR_LE; 1944 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) 1945 regs->msr |= MSR_LE; 1946 else 1947 return -EINVAL; 1948 1949 return 0; 1950 } 1951 1952 int get_endian(struct task_struct *tsk, unsigned long adr) 1953 { 1954 struct pt_regs *regs = tsk->thread.regs; 1955 unsigned int val; 1956 1957 if (!cpu_has_feature(CPU_FTR_PPC_LE) && 1958 !cpu_has_feature(CPU_FTR_REAL_LE)) 1959 return -EINVAL; 1960 1961 if (regs == NULL) 1962 return -EINVAL; 1963 1964 if (regs->msr & MSR_LE) { 1965 if (cpu_has_feature(CPU_FTR_REAL_LE)) 1966 val = PR_ENDIAN_LITTLE; 1967 else 1968 val = PR_ENDIAN_PPC_LITTLE; 1969 } else 1970 val = PR_ENDIAN_BIG; 1971 1972 return put_user(val, (unsigned int __user *)adr); 1973 } 1974 1975 int set_unalign_ctl(struct task_struct *tsk, unsigned int val) 1976 { 1977 tsk->thread.align_ctl = val; 1978 return 0; 1979 } 1980 1981 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) 1982 { 1983 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); 1984 } 1985 1986 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, 1987 unsigned long nbytes) 1988 { 1989 unsigned long stack_page; 1990 unsigned long cpu = task_cpu(p); 1991 1992 /* 1993 * Avoid crashing if the stack has overflowed and corrupted 1994 * task_cpu(p), which is in the thread_info struct. 1995 */ 1996 if (cpu < NR_CPUS && cpu_possible(cpu)) { 1997 stack_page = (unsigned long) hardirq_ctx[cpu]; 1998 if (sp >= stack_page + sizeof(struct thread_struct) 1999 && sp <= stack_page + THREAD_SIZE - nbytes) 2000 return 1; 2001 2002 stack_page = (unsigned long) softirq_ctx[cpu]; 2003 if (sp >= stack_page + sizeof(struct thread_struct) 2004 && sp <= stack_page + THREAD_SIZE - nbytes) 2005 return 1; 2006 } 2007 return 0; 2008 } 2009 2010 int validate_sp(unsigned long sp, struct task_struct *p, 2011 unsigned long nbytes) 2012 { 2013 unsigned long stack_page = (unsigned long)task_stack_page(p); 2014 2015 if (sp >= stack_page + sizeof(struct thread_struct) 2016 && sp <= stack_page + THREAD_SIZE - nbytes) 2017 return 1; 2018 2019 return valid_irq_stack(sp, p, nbytes); 2020 } 2021 2022 EXPORT_SYMBOL(validate_sp); 2023 2024 unsigned long get_wchan(struct task_struct *p) 2025 { 2026 unsigned long ip, sp; 2027 int count = 0; 2028 2029 if (!p || p == current || p->state == TASK_RUNNING) 2030 return 0; 2031 2032 sp = p->thread.ksp; 2033 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 2034 return 0; 2035 2036 do { 2037 sp = *(unsigned long *)sp; 2038 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || 2039 p->state == TASK_RUNNING) 2040 return 0; 2041 if (count > 0) { 2042 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; 2043 if (!in_sched_functions(ip)) 2044 return ip; 2045 } 2046 } while (count++ < 16); 2047 return 0; 2048 } 2049 2050 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; 2051 2052 void show_stack(struct task_struct *tsk, unsigned long *stack) 2053 { 2054 unsigned long sp, ip, lr, newsp; 2055 int count = 0; 2056 int firstframe = 1; 2057 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 2058 int curr_frame = current->curr_ret_stack; 2059 extern void return_to_handler(void); 2060 unsigned long rth = (unsigned long)return_to_handler; 2061 #endif 2062 2063 sp = (unsigned long) stack; 2064 if (tsk == NULL) 2065 tsk = current; 2066 if (sp == 0) { 2067 if (tsk == current) 2068 sp = current_stack_pointer(); 2069 else 2070 sp = tsk->thread.ksp; 2071 } 2072 2073 lr = 0; 2074 printk("Call Trace:\n"); 2075 do { 2076 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) 2077 return; 2078 2079 stack = (unsigned long *) sp; 2080 newsp = stack[0]; 2081 ip = stack[STACK_FRAME_LR_SAVE]; 2082 if (!firstframe || ip != lr) { 2083 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 2084 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 2085 if ((ip == rth) && curr_frame >= 0) { 2086 pr_cont(" (%pS)", 2087 (void *)current->ret_stack[curr_frame].ret); 2088 curr_frame--; 2089 } 2090 #endif 2091 if (firstframe) 2092 pr_cont(" (unreliable)"); 2093 pr_cont("\n"); 2094 } 2095 firstframe = 0; 2096 2097 /* 2098 * See if this is an exception frame. 2099 * We look for the "regshere" marker in the current frame. 2100 */ 2101 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) 2102 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { 2103 struct pt_regs *regs = (struct pt_regs *) 2104 (sp + STACK_FRAME_OVERHEAD); 2105 lr = regs->link; 2106 printk("--- interrupt: %lx at %pS\n LR = %pS\n", 2107 regs->trap, (void *)regs->nip, (void *)lr); 2108 firstframe = 1; 2109 } 2110 2111 sp = newsp; 2112 } while (count++ < kstack_depth_to_print); 2113 } 2114 2115 #ifdef CONFIG_PPC64 2116 /* Called with hard IRQs off */ 2117 void notrace __ppc64_runlatch_on(void) 2118 { 2119 struct thread_info *ti = current_thread_info(); 2120 2121 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 2122 /* 2123 * Least significant bit (RUN) is the only writable bit of 2124 * the CTRL register, so we can avoid mfspr. 2.06 is not the 2125 * earliest ISA where this is the case, but it's convenient. 2126 */ 2127 mtspr(SPRN_CTRLT, CTRL_RUNLATCH); 2128 } else { 2129 unsigned long ctrl; 2130 2131 /* 2132 * Some architectures (e.g., Cell) have writable fields other 2133 * than RUN, so do the read-modify-write. 2134 */ 2135 ctrl = mfspr(SPRN_CTRLF); 2136 ctrl |= CTRL_RUNLATCH; 2137 mtspr(SPRN_CTRLT, ctrl); 2138 } 2139 2140 ti->local_flags |= _TLF_RUNLATCH; 2141 } 2142 2143 /* Called with hard IRQs off */ 2144 void notrace __ppc64_runlatch_off(void) 2145 { 2146 struct thread_info *ti = current_thread_info(); 2147 2148 ti->local_flags &= ~_TLF_RUNLATCH; 2149 2150 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 2151 mtspr(SPRN_CTRLT, 0); 2152 } else { 2153 unsigned long ctrl; 2154 2155 ctrl = mfspr(SPRN_CTRLF); 2156 ctrl &= ~CTRL_RUNLATCH; 2157 mtspr(SPRN_CTRLT, ctrl); 2158 } 2159 } 2160 #endif /* CONFIG_PPC64 */ 2161 2162 unsigned long arch_align_stack(unsigned long sp) 2163 { 2164 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 2165 sp -= get_random_int() & ~PAGE_MASK; 2166 return sp & ~0xf; 2167 } 2168 2169 static inline unsigned long brk_rnd(void) 2170 { 2171 unsigned long rnd = 0; 2172 2173 /* 8MB for 32bit, 1GB for 64bit */ 2174 if (is_32bit_task()) 2175 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT))); 2176 else 2177 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT))); 2178 2179 return rnd << PAGE_SHIFT; 2180 } 2181 2182 unsigned long arch_randomize_brk(struct mm_struct *mm) 2183 { 2184 unsigned long base = mm->brk; 2185 unsigned long ret; 2186 2187 #ifdef CONFIG_PPC_BOOK3S_64 2188 /* 2189 * If we are using 1TB segments and we are allowed to randomise 2190 * the heap, we can put it above 1TB so it is backed by a 1TB 2191 * segment. Otherwise the heap will be in the bottom 1TB 2192 * which always uses 256MB segments and this may result in a 2193 * performance penalty. We don't need to worry about radix. For 2194 * radix, mmu_highuser_ssize remains unchanged from 256MB. 2195 */ 2196 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 2197 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); 2198 #endif 2199 2200 ret = PAGE_ALIGN(base + brk_rnd()); 2201 2202 if (ret < mm->brk) 2203 return mm->brk; 2204 2205 return ret; 2206 } 2207 2208