1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Common boot and setup code. 5 * 6 * Copyright (C) 2001 PPC64 Team, IBM Corp 7 */ 8 9 #include <linux/export.h> 10 #include <linux/string.h> 11 #include <linux/sched.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/reboot.h> 15 #include <linux/delay.h> 16 #include <linux/initrd.h> 17 #include <linux/seq_file.h> 18 #include <linux/ioport.h> 19 #include <linux/console.h> 20 #include <linux/utsname.h> 21 #include <linux/tty.h> 22 #include <linux/root_dev.h> 23 #include <linux/notifier.h> 24 #include <linux/cpu.h> 25 #include <linux/unistd.h> 26 #include <linux/serial.h> 27 #include <linux/serial_8250.h> 28 #include <linux/memblock.h> 29 #include <linux/pci.h> 30 #include <linux/lockdep.h> 31 #include <linux/memory.h> 32 #include <linux/nmi.h> 33 34 #include <asm/debugfs.h> 35 #include <asm/io.h> 36 #include <asm/kdump.h> 37 #include <asm/prom.h> 38 #include <asm/processor.h> 39 #include <asm/pgtable.h> 40 #include <asm/smp.h> 41 #include <asm/elf.h> 42 #include <asm/machdep.h> 43 #include <asm/paca.h> 44 #include <asm/time.h> 45 #include <asm/cputable.h> 46 #include <asm/dt_cpu_ftrs.h> 47 #include <asm/sections.h> 48 #include <asm/btext.h> 49 #include <asm/nvram.h> 50 #include <asm/setup.h> 51 #include <asm/rtas.h> 52 #include <asm/iommu.h> 53 #include <asm/serial.h> 54 #include <asm/cache.h> 55 #include <asm/page.h> 56 #include <asm/mmu.h> 57 #include <asm/firmware.h> 58 #include <asm/xmon.h> 59 #include <asm/udbg.h> 60 #include <asm/kexec.h> 61 #include <asm/code-patching.h> 62 #include <asm/livepatch.h> 63 #include <asm/opal.h> 64 #include <asm/cputhreads.h> 65 #include <asm/hw_irq.h> 66 #include <asm/feature-fixups.h> 67 #include <asm/kup.h> 68 #include <asm/early_ioremap.h> 69 70 #include "setup.h" 71 72 int spinning_secondaries; 73 u64 ppc64_pft_size; 74 75 struct ppc64_caches ppc64_caches = { 76 .l1d = { 77 .block_size = 0x40, 78 .log_block_size = 6, 79 }, 80 .l1i = { 81 .block_size = 0x40, 82 .log_block_size = 6 83 }, 84 }; 85 EXPORT_SYMBOL_GPL(ppc64_caches); 86 87 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) 88 void __init setup_tlb_core_data(void) 89 { 90 int cpu; 91 92 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0); 93 94 for_each_possible_cpu(cpu) { 95 int first = cpu_first_thread_sibling(cpu); 96 97 /* 98 * If we boot via kdump on a non-primary thread, 99 * make sure we point at the thread that actually 100 * set up this TLB. 101 */ 102 if (cpu_first_thread_sibling(boot_cpuid) == first) 103 first = boot_cpuid; 104 105 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd; 106 107 /* 108 * If we have threads, we need either tlbsrx. 109 * or e6500 tablewalk mode, or else TLB handlers 110 * will be racy and could produce duplicate entries. 111 * Should we panic instead? 112 */ 113 WARN_ONCE(smt_enabled_at_boot >= 2 && 114 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && 115 book3e_htw_mode != PPC_HTW_E6500, 116 "%s: unsupported MMU configuration\n", __func__); 117 } 118 } 119 #endif 120 121 #ifdef CONFIG_SMP 122 123 static char *smt_enabled_cmdline; 124 125 /* Look for ibm,smt-enabled OF option */ 126 void __init check_smt_enabled(void) 127 { 128 struct device_node *dn; 129 const char *smt_option; 130 131 /* Default to enabling all threads */ 132 smt_enabled_at_boot = threads_per_core; 133 134 /* Allow the command line to overrule the OF option */ 135 if (smt_enabled_cmdline) { 136 if (!strcmp(smt_enabled_cmdline, "on")) 137 smt_enabled_at_boot = threads_per_core; 138 else if (!strcmp(smt_enabled_cmdline, "off")) 139 smt_enabled_at_boot = 0; 140 else { 141 int smt; 142 int rc; 143 144 rc = kstrtoint(smt_enabled_cmdline, 10, &smt); 145 if (!rc) 146 smt_enabled_at_boot = 147 min(threads_per_core, smt); 148 } 149 } else { 150 dn = of_find_node_by_path("/options"); 151 if (dn) { 152 smt_option = of_get_property(dn, "ibm,smt-enabled", 153 NULL); 154 155 if (smt_option) { 156 if (!strcmp(smt_option, "on")) 157 smt_enabled_at_boot = threads_per_core; 158 else if (!strcmp(smt_option, "off")) 159 smt_enabled_at_boot = 0; 160 } 161 162 of_node_put(dn); 163 } 164 } 165 } 166 167 /* Look for smt-enabled= cmdline option */ 168 static int __init early_smt_enabled(char *p) 169 { 170 smt_enabled_cmdline = p; 171 return 0; 172 } 173 early_param("smt-enabled", early_smt_enabled); 174 175 #endif /* CONFIG_SMP */ 176 177 /** Fix up paca fields required for the boot cpu */ 178 static void __init fixup_boot_paca(void) 179 { 180 /* The boot cpu is started */ 181 get_paca()->cpu_start = 1; 182 /* Allow percpu accesses to work until we setup percpu data */ 183 get_paca()->data_offset = 0; 184 /* Mark interrupts disabled in PACA */ 185 irq_soft_mask_set(IRQS_DISABLED); 186 } 187 188 static void __init configure_exceptions(void) 189 { 190 /* 191 * Setup the trampolines from the lowmem exception vectors 192 * to the kdump kernel when not using a relocatable kernel. 193 */ 194 setup_kdump_trampoline(); 195 196 /* Under a PAPR hypervisor, we need hypercalls */ 197 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 198 /* Enable AIL if possible */ 199 pseries_enable_reloc_on_exc(); 200 201 /* 202 * Tell the hypervisor that we want our exceptions to 203 * be taken in little endian mode. 204 * 205 * We don't call this for big endian as our calling convention 206 * makes us always enter in BE, and the call may fail under 207 * some circumstances with kdump. 208 */ 209 #ifdef __LITTLE_ENDIAN__ 210 pseries_little_endian_exceptions(); 211 #endif 212 } else { 213 /* Set endian mode using OPAL */ 214 if (firmware_has_feature(FW_FEATURE_OPAL)) 215 opal_configure_cores(); 216 217 /* AIL on native is done in cpu_ready_for_interrupts() */ 218 } 219 } 220 221 static void cpu_ready_for_interrupts(void) 222 { 223 /* 224 * Enable AIL if supported, and we are in hypervisor mode. This 225 * is called once for every processor. 226 * 227 * If we are not in hypervisor mode the job is done once for 228 * the whole partition in configure_exceptions(). 229 */ 230 if (cpu_has_feature(CPU_FTR_HVMODE) && 231 cpu_has_feature(CPU_FTR_ARCH_207S)) { 232 unsigned long lpcr = mfspr(SPRN_LPCR); 233 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 234 } 235 236 /* 237 * Set HFSCR:TM based on CPU features: 238 * In the special case of TM no suspend (P9N DD2.1), Linux is 239 * told TM is off via the dt-ftrs but told to (partially) use 240 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM] 241 * will be off from dt-ftrs but we need to turn it on for the 242 * no suspend case. 243 */ 244 if (cpu_has_feature(CPU_FTR_HVMODE)) { 245 if (cpu_has_feature(CPU_FTR_TM_COMP)) 246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM); 247 else 248 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); 249 } 250 251 /* Set IR and DR in PACA MSR */ 252 get_paca()->kernel_msr = MSR_KERNEL; 253 } 254 255 unsigned long spr_default_dscr = 0; 256 257 void __init record_spr_defaults(void) 258 { 259 if (early_cpu_has_feature(CPU_FTR_DSCR)) 260 spr_default_dscr = mfspr(SPRN_DSCR); 261 } 262 263 /* 264 * Early initialization entry point. This is called by head.S 265 * with MMU translation disabled. We rely on the "feature" of 266 * the CPU that ignores the top 2 bits of the address in real 267 * mode so we can access kernel globals normally provided we 268 * only toy with things in the RMO region. From here, we do 269 * some early parsing of the device-tree to setup out MEMBLOCK 270 * data structures, and allocate & initialize the hash table 271 * and segment tables so we can start running with translation 272 * enabled. 273 * 274 * It is this function which will call the probe() callback of 275 * the various platform types and copy the matching one to the 276 * global ppc_md structure. Your platform can eventually do 277 * some very early initializations from the probe() routine, but 278 * this is not recommended, be very careful as, for example, the 279 * device-tree is not accessible via normal means at this point. 280 */ 281 282 void __init __nostackprotector early_setup(unsigned long dt_ptr) 283 { 284 static __initdata struct paca_struct boot_paca; 285 286 /* -------- printk is _NOT_ safe to use here ! ------- */ 287 288 /* 289 * Assume we're on cpu 0 for now. 290 * 291 * We need to load a PACA very early for a few reasons. 292 * 293 * The stack protector canary is stored in the paca, so as soon as we 294 * call any stack protected code we need r13 pointing somewhere valid. 295 * 296 * If we are using kcov it will call in_task() in its instrumentation, 297 * which relies on the current task from the PACA. 298 * 299 * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as 300 * printk(), which can trigger both stack protector and kcov. 301 * 302 * percpu variables and spin locks also use the paca. 303 * 304 * So set up a temporary paca. It will be replaced below once we know 305 * what CPU we are on. 306 */ 307 initialise_paca(&boot_paca, 0); 308 setup_paca(&boot_paca); 309 fixup_boot_paca(); 310 311 /* -------- printk is now safe to use ------- */ 312 313 /* Try new device tree based feature discovery ... */ 314 if (!dt_cpu_ftrs_init(__va(dt_ptr))) 315 /* Otherwise use the old style CPU table */ 316 identify_cpu(0, mfspr(SPRN_PVR)); 317 318 /* Enable early debugging if any specified (see udbg.h) */ 319 udbg_early_init(); 320 321 udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr); 322 323 /* 324 * Do early initialization using the flattened device 325 * tree, such as retrieving the physical memory map or 326 * calculating/retrieving the hash table size. 327 */ 328 early_init_devtree(__va(dt_ptr)); 329 330 /* Now we know the logical id of our boot cpu, setup the paca. */ 331 if (boot_cpuid != 0) { 332 /* Poison paca_ptrs[0] again if it's not the boot cpu */ 333 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); 334 } 335 setup_paca(paca_ptrs[boot_cpuid]); 336 fixup_boot_paca(); 337 338 /* 339 * Configure exception handlers. This include setting up trampolines 340 * if needed, setting exception endian mode, etc... 341 */ 342 configure_exceptions(); 343 344 /* 345 * Configure Kernel Userspace Protection. This needs to happen before 346 * feature fixups for platforms that implement this using features. 347 */ 348 setup_kup(); 349 350 /* Apply all the dynamic patching */ 351 apply_feature_fixups(); 352 setup_feature_keys(); 353 354 early_ioremap_setup(); 355 356 /* Initialize the hash table or TLB handling */ 357 early_init_mmu(); 358 359 /* 360 * After firmware and early platform setup code has set things up, 361 * we note the SPR values for configurable control/performance 362 * registers, and use those as initial defaults. 363 */ 364 record_spr_defaults(); 365 366 /* 367 * At this point, we can let interrupts switch to virtual mode 368 * (the MMU has been setup), so adjust the MSR in the PACA to 369 * have IR and DR set and enable AIL if it exists 370 */ 371 cpu_ready_for_interrupts(); 372 373 /* 374 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it 375 * will only actually get enabled on the boot cpu much later once 376 * ftrace itself has been initialized. 377 */ 378 this_cpu_enable_ftrace(); 379 380 udbg_printf(" <- %s()\n", __func__); 381 382 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 383 /* 384 * This needs to be done *last* (after the above udbg_printf() even) 385 * 386 * Right after we return from this function, we turn on the MMU 387 * which means the real-mode access trick that btext does will 388 * no longer work, it needs to switch to using a real MMU 389 * mapping. This call will ensure that it does 390 */ 391 btext_map(); 392 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 393 } 394 395 #ifdef CONFIG_SMP 396 void early_setup_secondary(void) 397 { 398 /* Mark interrupts disabled in PACA */ 399 irq_soft_mask_set(IRQS_DISABLED); 400 401 /* Initialize the hash table or TLB handling */ 402 early_init_mmu_secondary(); 403 404 /* Perform any KUP setup that is per-cpu */ 405 setup_kup(); 406 407 /* 408 * At this point, we can let interrupts switch to virtual mode 409 * (the MMU has been setup), so adjust the MSR in the PACA to 410 * have IR and DR set. 411 */ 412 cpu_ready_for_interrupts(); 413 } 414 415 #endif /* CONFIG_SMP */ 416 417 void panic_smp_self_stop(void) 418 { 419 hard_irq_disable(); 420 spin_begin(); 421 while (1) 422 spin_cpu_relax(); 423 } 424 425 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) 426 static bool use_spinloop(void) 427 { 428 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) { 429 /* 430 * See comments in head_64.S -- not all platforms insert 431 * secondaries at __secondary_hold and wait at the spin 432 * loop. 433 */ 434 if (firmware_has_feature(FW_FEATURE_OPAL)) 435 return false; 436 return true; 437 } 438 439 /* 440 * When book3e boots from kexec, the ePAPR spin table does 441 * not get used. 442 */ 443 return of_property_read_bool(of_chosen, "linux,booted-from-kexec"); 444 } 445 446 void smp_release_cpus(void) 447 { 448 unsigned long *ptr; 449 int i; 450 451 if (!use_spinloop()) 452 return; 453 454 /* All secondary cpus are spinning on a common spinloop, release them 455 * all now so they can start to spin on their individual paca 456 * spinloops. For non SMP kernels, the secondary cpus never get out 457 * of the common spinloop. 458 */ 459 460 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 461 - PHYSICAL_START); 462 *ptr = ppc_function_entry(generic_secondary_smp_init); 463 464 /* And wait a bit for them to catch up */ 465 for (i = 0; i < 100000; i++) { 466 mb(); 467 HMT_low(); 468 if (spinning_secondaries == 0) 469 break; 470 udelay(1); 471 } 472 pr_debug("spinning_secondaries = %d\n", spinning_secondaries); 473 } 474 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */ 475 476 /* 477 * Initialize some remaining members of the ppc64_caches and systemcfg 478 * structures 479 * (at least until we get rid of them completely). This is mostly some 480 * cache informations about the CPU that will be used by cache flush 481 * routines and/or provided to userland 482 */ 483 484 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, 485 u32 bsize, u32 sets) 486 { 487 info->size = size; 488 info->sets = sets; 489 info->line_size = lsize; 490 info->block_size = bsize; 491 info->log_block_size = __ilog2(bsize); 492 if (bsize) 493 info->blocks_per_page = PAGE_SIZE / bsize; 494 else 495 info->blocks_per_page = 0; 496 497 if (sets == 0) 498 info->assoc = 0xffff; 499 else 500 info->assoc = size / (sets * lsize); 501 } 502 503 static bool __init parse_cache_info(struct device_node *np, 504 bool icache, 505 struct ppc_cache_info *info) 506 { 507 static const char *ipropnames[] __initdata = { 508 "i-cache-size", 509 "i-cache-sets", 510 "i-cache-block-size", 511 "i-cache-line-size", 512 }; 513 static const char *dpropnames[] __initdata = { 514 "d-cache-size", 515 "d-cache-sets", 516 "d-cache-block-size", 517 "d-cache-line-size", 518 }; 519 const char **propnames = icache ? ipropnames : dpropnames; 520 const __be32 *sizep, *lsizep, *bsizep, *setsp; 521 u32 size, lsize, bsize, sets; 522 bool success = true; 523 524 size = 0; 525 sets = -1u; 526 lsize = bsize = cur_cpu_spec->dcache_bsize; 527 sizep = of_get_property(np, propnames[0], NULL); 528 if (sizep != NULL) 529 size = be32_to_cpu(*sizep); 530 setsp = of_get_property(np, propnames[1], NULL); 531 if (setsp != NULL) 532 sets = be32_to_cpu(*setsp); 533 bsizep = of_get_property(np, propnames[2], NULL); 534 lsizep = of_get_property(np, propnames[3], NULL); 535 if (bsizep == NULL) 536 bsizep = lsizep; 537 if (lsizep != NULL) 538 lsize = be32_to_cpu(*lsizep); 539 if (bsizep != NULL) 540 bsize = be32_to_cpu(*bsizep); 541 if (sizep == NULL || bsizep == NULL || lsizep == NULL) 542 success = false; 543 544 /* 545 * OF is weird .. it represents fully associative caches 546 * as "1 way" which doesn't make much sense and doesn't 547 * leave room for direct mapped. We'll assume that 0 548 * in OF means direct mapped for that reason. 549 */ 550 if (sets == 1) 551 sets = 0; 552 else if (sets == 0) 553 sets = 1; 554 555 init_cache_info(info, size, lsize, bsize, sets); 556 557 return success; 558 } 559 560 void __init initialize_cache_info(void) 561 { 562 struct device_node *cpu = NULL, *l2, *l3 = NULL; 563 u32 pvr; 564 565 /* 566 * All shipping POWER8 machines have a firmware bug that 567 * puts incorrect information in the device-tree. This will 568 * be (hopefully) fixed for future chips but for now hard 569 * code the values if we are running on one of these 570 */ 571 pvr = PVR_VER(mfspr(SPRN_PVR)); 572 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E || 573 pvr == PVR_POWER8NVL) { 574 /* size lsize blk sets */ 575 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32); 576 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64); 577 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512); 578 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192); 579 } else 580 cpu = of_find_node_by_type(NULL, "cpu"); 581 582 /* 583 * We're assuming *all* of the CPUs have the same 584 * d-cache and i-cache sizes... -Peter 585 */ 586 if (cpu) { 587 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d)) 588 pr_warn("Argh, can't find dcache properties !\n"); 589 590 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i)) 591 pr_warn("Argh, can't find icache properties !\n"); 592 593 /* 594 * Try to find the L2 and L3 if any. Assume they are 595 * unified and use the D-side properties. 596 */ 597 l2 = of_find_next_cache_node(cpu); 598 of_node_put(cpu); 599 if (l2) { 600 parse_cache_info(l2, false, &ppc64_caches.l2); 601 l3 = of_find_next_cache_node(l2); 602 of_node_put(l2); 603 } 604 if (l3) { 605 parse_cache_info(l3, false, &ppc64_caches.l3); 606 of_node_put(l3); 607 } 608 } 609 610 /* For use by binfmt_elf */ 611 dcache_bsize = ppc64_caches.l1d.block_size; 612 icache_bsize = ppc64_caches.l1i.block_size; 613 614 cur_cpu_spec->dcache_bsize = dcache_bsize; 615 cur_cpu_spec->icache_bsize = icache_bsize; 616 } 617 618 /* 619 * This returns the limit below which memory accesses to the linear 620 * mapping are guarnateed not to cause an architectural exception (e.g., 621 * TLB or SLB miss fault). 622 * 623 * This is used to allocate PACAs and various interrupt stacks that 624 * that are accessed early in interrupt handlers that must not cause 625 * re-entrant interrupts. 626 */ 627 __init u64 ppc64_bolted_size(void) 628 { 629 #ifdef CONFIG_PPC_BOOK3E 630 /* Freescale BookE bolts the entire linear mapping */ 631 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ 632 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) 633 return linear_map_top; 634 /* Other BookE, we assume the first GB is bolted */ 635 return 1ul << 30; 636 #else 637 /* BookS radix, does not take faults on linear mapping */ 638 if (early_radix_enabled()) 639 return ULONG_MAX; 640 641 /* BookS hash, the first segment is bolted */ 642 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT)) 643 return 1UL << SID_SHIFT_1T; 644 return 1UL << SID_SHIFT; 645 #endif 646 } 647 648 static void *__init alloc_stack(unsigned long limit, int cpu) 649 { 650 void *ptr; 651 652 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); 653 654 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN, 655 MEMBLOCK_LOW_LIMIT, limit, 656 early_cpu_to_node(cpu)); 657 if (!ptr) 658 panic("cannot allocate stacks"); 659 660 return ptr; 661 } 662 663 void __init irqstack_early_init(void) 664 { 665 u64 limit = ppc64_bolted_size(); 666 unsigned int i; 667 668 /* 669 * Interrupt stacks must be in the first segment since we 670 * cannot afford to take SLB misses on them. They are not 671 * accessed in realmode. 672 */ 673 for_each_possible_cpu(i) { 674 softirq_ctx[i] = alloc_stack(limit, i); 675 hardirq_ctx[i] = alloc_stack(limit, i); 676 } 677 } 678 679 #ifdef CONFIG_PPC_BOOK3E 680 void __init exc_lvl_early_init(void) 681 { 682 unsigned int i; 683 684 for_each_possible_cpu(i) { 685 void *sp; 686 687 sp = alloc_stack(ULONG_MAX, i); 688 critirq_ctx[i] = sp; 689 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE; 690 691 sp = alloc_stack(ULONG_MAX, i); 692 dbgirq_ctx[i] = sp; 693 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE; 694 695 sp = alloc_stack(ULONG_MAX, i); 696 mcheckirq_ctx[i] = sp; 697 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE; 698 } 699 700 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) 701 patch_exception(0x040, exc_debug_debug_book3e); 702 } 703 #endif 704 705 /* 706 * Stack space used when we detect a bad kernel stack pointer, and 707 * early in SMP boots before relocation is enabled. Exclusive emergency 708 * stack for machine checks. 709 */ 710 void __init emergency_stack_init(void) 711 { 712 u64 limit; 713 unsigned int i; 714 715 /* 716 * Emergency stacks must be under 256MB, we cannot afford to take 717 * SLB misses on them. The ABI also requires them to be 128-byte 718 * aligned. 719 * 720 * Since we use these as temporary stacks during secondary CPU 721 * bringup, machine check, system reset, and HMI, we need to get 722 * at them in real mode. This means they must also be within the RMO 723 * region. 724 * 725 * The IRQ stacks allocated elsewhere in this file are zeroed and 726 * initialized in kernel/irq.c. These are initialized here in order 727 * to have emergency stacks available as early as possible. 728 */ 729 limit = min(ppc64_bolted_size(), ppc64_rma_size); 730 731 for_each_possible_cpu(i) { 732 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; 733 734 #ifdef CONFIG_PPC_BOOK3S_64 735 /* emergency stack for NMI exception handling. */ 736 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; 737 738 /* emergency stack for machine check exception handling. */ 739 paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; 740 #endif 741 } 742 } 743 744 #ifdef CONFIG_SMP 745 #define PCPU_DYN_SIZE () 746 747 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 748 { 749 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), 750 MEMBLOCK_ALLOC_ACCESSIBLE, 751 early_cpu_to_node(cpu)); 752 753 } 754 755 static void __init pcpu_fc_free(void *ptr, size_t size) 756 { 757 memblock_free(__pa(ptr), size); 758 } 759 760 static int pcpu_cpu_distance(unsigned int from, unsigned int to) 761 { 762 if (early_cpu_to_node(from) == early_cpu_to_node(to)) 763 return LOCAL_DISTANCE; 764 else 765 return REMOTE_DISTANCE; 766 } 767 768 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 769 EXPORT_SYMBOL(__per_cpu_offset); 770 771 void __init setup_per_cpu_areas(void) 772 { 773 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 774 size_t atom_size; 775 unsigned long delta; 776 unsigned int cpu; 777 int rc; 778 779 /* 780 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need 781 * to group units. For larger mappings, use 1M atom which 782 * should be large enough to contain a number of units. 783 */ 784 if (mmu_linear_psize == MMU_PAGE_4K) 785 atom_size = PAGE_SIZE; 786 else 787 atom_size = 1 << 20; 788 789 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, 790 pcpu_fc_alloc, pcpu_fc_free); 791 if (rc < 0) 792 panic("cannot initialize percpu area (err=%d)", rc); 793 794 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 795 for_each_possible_cpu(cpu) { 796 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 797 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu]; 798 } 799 } 800 #endif 801 802 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 803 unsigned long memory_block_size_bytes(void) 804 { 805 if (ppc_md.memory_block_size) 806 return ppc_md.memory_block_size(); 807 808 return MIN_MEMORY_BLOCK_SIZE; 809 } 810 #endif 811 812 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) 813 struct ppc_pci_io ppc_pci_io; 814 EXPORT_SYMBOL(ppc_pci_io); 815 #endif 816 817 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 818 u64 hw_nmi_get_sample_period(int watchdog_thresh) 819 { 820 return ppc_proc_freq * watchdog_thresh; 821 } 822 #endif 823 824 /* 825 * The perf based hardlockup detector breaks PMU event based branches, so 826 * disable it by default. Book3S has a soft-nmi hardlockup detector based 827 * on the decrementer interrupt, so it does not suffer from this problem. 828 * 829 * It is likely to get false positives in VM guests, so disable it there 830 * by default too. 831 */ 832 static int __init disable_hardlockup_detector(void) 833 { 834 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 835 hardlockup_detector_disable(); 836 #else 837 if (firmware_has_feature(FW_FEATURE_LPAR)) 838 hardlockup_detector_disable(); 839 #endif 840 841 return 0; 842 } 843 early_initcall(disable_hardlockup_detector); 844 845 #ifdef CONFIG_PPC_BOOK3S_64 846 static enum l1d_flush_type enabled_flush_types; 847 static void *l1d_flush_fallback_area; 848 static bool no_rfi_flush; 849 bool rfi_flush; 850 851 static int __init handle_no_rfi_flush(char *p) 852 { 853 pr_info("rfi-flush: disabled on command line."); 854 no_rfi_flush = true; 855 return 0; 856 } 857 early_param("no_rfi_flush", handle_no_rfi_flush); 858 859 /* 860 * The RFI flush is not KPTI, but because users will see doco that says to use 861 * nopti we hijack that option here to also disable the RFI flush. 862 */ 863 static int __init handle_no_pti(char *p) 864 { 865 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 866 handle_no_rfi_flush(NULL); 867 return 0; 868 } 869 early_param("nopti", handle_no_pti); 870 871 static void do_nothing(void *unused) 872 { 873 /* 874 * We don't need to do the flush explicitly, just enter+exit kernel is 875 * sufficient, the RFI exit handlers will do the right thing. 876 */ 877 } 878 879 void rfi_flush_enable(bool enable) 880 { 881 if (enable) { 882 do_rfi_flush_fixups(enabled_flush_types); 883 on_each_cpu(do_nothing, NULL, 1); 884 } else 885 do_rfi_flush_fixups(L1D_FLUSH_NONE); 886 887 rfi_flush = enable; 888 } 889 890 static void __ref init_fallback_flush(void) 891 { 892 u64 l1d_size, limit; 893 int cpu; 894 895 /* Only allocate the fallback flush area once (at boot time). */ 896 if (l1d_flush_fallback_area) 897 return; 898 899 l1d_size = ppc64_caches.l1d.size; 900 901 /* 902 * If there is no d-cache-size property in the device tree, l1d_size 903 * could be zero. That leads to the loop in the asm wrapping around to 904 * 2^64-1, and then walking off the end of the fallback area and 905 * eventually causing a page fault which is fatal. Just default to 906 * something vaguely sane. 907 */ 908 if (!l1d_size) 909 l1d_size = (64 * 1024); 910 911 limit = min(ppc64_bolted_size(), ppc64_rma_size); 912 913 /* 914 * Align to L1d size, and size it at 2x L1d size, to catch possible 915 * hardware prefetch runoff. We don't have a recipe for load patterns to 916 * reliably avoid the prefetcher. 917 */ 918 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, 919 l1d_size, MEMBLOCK_LOW_LIMIT, 920 limit, NUMA_NO_NODE); 921 if (!l1d_flush_fallback_area) 922 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", 923 __func__, l1d_size * 2, l1d_size, &limit); 924 925 926 for_each_possible_cpu(cpu) { 927 struct paca_struct *paca = paca_ptrs[cpu]; 928 paca->rfi_flush_fallback_area = l1d_flush_fallback_area; 929 paca->l1d_flush_size = l1d_size; 930 } 931 } 932 933 void setup_rfi_flush(enum l1d_flush_type types, bool enable) 934 { 935 if (types & L1D_FLUSH_FALLBACK) { 936 pr_info("rfi-flush: fallback displacement flush available\n"); 937 init_fallback_flush(); 938 } 939 940 if (types & L1D_FLUSH_ORI) 941 pr_info("rfi-flush: ori type flush available\n"); 942 943 if (types & L1D_FLUSH_MTTRIG) 944 pr_info("rfi-flush: mttrig type flush available\n"); 945 946 enabled_flush_types = types; 947 948 if (!no_rfi_flush && !cpu_mitigations_off()) 949 rfi_flush_enable(enable); 950 } 951 952 #ifdef CONFIG_DEBUG_FS 953 static int rfi_flush_set(void *data, u64 val) 954 { 955 bool enable; 956 957 if (val == 1) 958 enable = true; 959 else if (val == 0) 960 enable = false; 961 else 962 return -EINVAL; 963 964 /* Only do anything if we're changing state */ 965 if (enable != rfi_flush) 966 rfi_flush_enable(enable); 967 968 return 0; 969 } 970 971 static int rfi_flush_get(void *data, u64 *val) 972 { 973 *val = rfi_flush ? 1 : 0; 974 return 0; 975 } 976 977 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 978 979 static __init int rfi_flush_debugfs_init(void) 980 { 981 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); 982 return 0; 983 } 984 device_initcall(rfi_flush_debugfs_init); 985 #endif 986 #endif /* CONFIG_PPC_BOOK3S_64 */ 987