xref: /linux/arch/powerpc/kernel/setup_64.c (revision c4c14c3bd177ea769fee938674f73a8ec0cdd47a)
1 /*
2  *
3  * Common boot and setup code.
4  *
5  * Copyright (C) 2001 PPC64 Team, IBM Corp
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/export.h>
14 #include <linux/string.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/reboot.h>
19 #include <linux/delay.h>
20 #include <linux/initrd.h>
21 #include <linux/seq_file.h>
22 #include <linux/ioport.h>
23 #include <linux/console.h>
24 #include <linux/utsname.h>
25 #include <linux/tty.h>
26 #include <linux/root_dev.h>
27 #include <linux/notifier.h>
28 #include <linux/cpu.h>
29 #include <linux/unistd.h>
30 #include <linux/serial.h>
31 #include <linux/serial_8250.h>
32 #include <linux/bootmem.h>
33 #include <linux/pci.h>
34 #include <linux/lockdep.h>
35 #include <linux/memblock.h>
36 #include <linux/memory.h>
37 #include <linux/nmi.h>
38 
39 #include <asm/debugfs.h>
40 #include <asm/io.h>
41 #include <asm/kdump.h>
42 #include <asm/prom.h>
43 #include <asm/processor.h>
44 #include <asm/pgtable.h>
45 #include <asm/smp.h>
46 #include <asm/elf.h>
47 #include <asm/machdep.h>
48 #include <asm/paca.h>
49 #include <asm/time.h>
50 #include <asm/cputable.h>
51 #include <asm/dt_cpu_ftrs.h>
52 #include <asm/sections.h>
53 #include <asm/btext.h>
54 #include <asm/nvram.h>
55 #include <asm/setup.h>
56 #include <asm/rtas.h>
57 #include <asm/iommu.h>
58 #include <asm/serial.h>
59 #include <asm/cache.h>
60 #include <asm/page.h>
61 #include <asm/mmu.h>
62 #include <asm/firmware.h>
63 #include <asm/xmon.h>
64 #include <asm/udbg.h>
65 #include <asm/kexec.h>
66 #include <asm/code-patching.h>
67 #include <asm/livepatch.h>
68 #include <asm/opal.h>
69 #include <asm/cputhreads.h>
70 #include <asm/hw_irq.h>
71 #include <asm/feature-fixups.h>
72 
73 #include "setup.h"
74 
75 #ifdef DEBUG
76 #define DBG(fmt...) udbg_printf(fmt)
77 #else
78 #define DBG(fmt...)
79 #endif
80 
81 int spinning_secondaries;
82 u64 ppc64_pft_size;
83 
84 struct ppc64_caches ppc64_caches = {
85 	.l1d = {
86 		.block_size = 0x40,
87 		.log_block_size = 6,
88 	},
89 	.l1i = {
90 		.block_size = 0x40,
91 		.log_block_size = 6
92 	},
93 };
94 EXPORT_SYMBOL_GPL(ppc64_caches);
95 
96 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
97 void __init setup_tlb_core_data(void)
98 {
99 	int cpu;
100 
101 	BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
102 
103 	for_each_possible_cpu(cpu) {
104 		int first = cpu_first_thread_sibling(cpu);
105 
106 		/*
107 		 * If we boot via kdump on a non-primary thread,
108 		 * make sure we point at the thread that actually
109 		 * set up this TLB.
110 		 */
111 		if (cpu_first_thread_sibling(boot_cpuid) == first)
112 			first = boot_cpuid;
113 
114 		paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
115 
116 		/*
117 		 * If we have threads, we need either tlbsrx.
118 		 * or e6500 tablewalk mode, or else TLB handlers
119 		 * will be racy and could produce duplicate entries.
120 		 * Should we panic instead?
121 		 */
122 		WARN_ONCE(smt_enabled_at_boot >= 2 &&
123 			  !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
124 			  book3e_htw_mode != PPC_HTW_E6500,
125 			  "%s: unsupported MMU configuration\n", __func__);
126 	}
127 }
128 #endif
129 
130 #ifdef CONFIG_SMP
131 
132 static char *smt_enabled_cmdline;
133 
134 /* Look for ibm,smt-enabled OF option */
135 void __init check_smt_enabled(void)
136 {
137 	struct device_node *dn;
138 	const char *smt_option;
139 
140 	/* Default to enabling all threads */
141 	smt_enabled_at_boot = threads_per_core;
142 
143 	/* Allow the command line to overrule the OF option */
144 	if (smt_enabled_cmdline) {
145 		if (!strcmp(smt_enabled_cmdline, "on"))
146 			smt_enabled_at_boot = threads_per_core;
147 		else if (!strcmp(smt_enabled_cmdline, "off"))
148 			smt_enabled_at_boot = 0;
149 		else {
150 			int smt;
151 			int rc;
152 
153 			rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
154 			if (!rc)
155 				smt_enabled_at_boot =
156 					min(threads_per_core, smt);
157 		}
158 	} else {
159 		dn = of_find_node_by_path("/options");
160 		if (dn) {
161 			smt_option = of_get_property(dn, "ibm,smt-enabled",
162 						     NULL);
163 
164 			if (smt_option) {
165 				if (!strcmp(smt_option, "on"))
166 					smt_enabled_at_boot = threads_per_core;
167 				else if (!strcmp(smt_option, "off"))
168 					smt_enabled_at_boot = 0;
169 			}
170 
171 			of_node_put(dn);
172 		}
173 	}
174 }
175 
176 /* Look for smt-enabled= cmdline option */
177 static int __init early_smt_enabled(char *p)
178 {
179 	smt_enabled_cmdline = p;
180 	return 0;
181 }
182 early_param("smt-enabled", early_smt_enabled);
183 
184 #endif /* CONFIG_SMP */
185 
186 /** Fix up paca fields required for the boot cpu */
187 static void __init fixup_boot_paca(void)
188 {
189 	/* The boot cpu is started */
190 	get_paca()->cpu_start = 1;
191 	/* Allow percpu accesses to work until we setup percpu data */
192 	get_paca()->data_offset = 0;
193 	/* Mark interrupts disabled in PACA */
194 	irq_soft_mask_set(IRQS_DISABLED);
195 }
196 
197 static void __init configure_exceptions(void)
198 {
199 	/*
200 	 * Setup the trampolines from the lowmem exception vectors
201 	 * to the kdump kernel when not using a relocatable kernel.
202 	 */
203 	setup_kdump_trampoline();
204 
205 	/* Under a PAPR hypervisor, we need hypercalls */
206 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
207 		/* Enable AIL if possible */
208 		pseries_enable_reloc_on_exc();
209 
210 		/*
211 		 * Tell the hypervisor that we want our exceptions to
212 		 * be taken in little endian mode.
213 		 *
214 		 * We don't call this for big endian as our calling convention
215 		 * makes us always enter in BE, and the call may fail under
216 		 * some circumstances with kdump.
217 		 */
218 #ifdef __LITTLE_ENDIAN__
219 		pseries_little_endian_exceptions();
220 #endif
221 	} else {
222 		/* Set endian mode using OPAL */
223 		if (firmware_has_feature(FW_FEATURE_OPAL))
224 			opal_configure_cores();
225 
226 		/* AIL on native is done in cpu_ready_for_interrupts() */
227 	}
228 }
229 
230 static void cpu_ready_for_interrupts(void)
231 {
232 	/*
233 	 * Enable AIL if supported, and we are in hypervisor mode. This
234 	 * is called once for every processor.
235 	 *
236 	 * If we are not in hypervisor mode the job is done once for
237 	 * the whole partition in configure_exceptions().
238 	 */
239 	if (cpu_has_feature(CPU_FTR_HVMODE) &&
240 	    cpu_has_feature(CPU_FTR_ARCH_207S)) {
241 		unsigned long lpcr = mfspr(SPRN_LPCR);
242 		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
243 	}
244 
245 	/*
246 	 * Set HFSCR:TM based on CPU features:
247 	 * In the special case of TM no suspend (P9N DD2.1), Linux is
248 	 * told TM is off via the dt-ftrs but told to (partially) use
249 	 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
250 	 * will be off from dt-ftrs but we need to turn it on for the
251 	 * no suspend case.
252 	 */
253 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
254 		if (cpu_has_feature(CPU_FTR_TM_COMP))
255 			mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
256 		else
257 			mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
258 	}
259 
260 	/* Set IR and DR in PACA MSR */
261 	get_paca()->kernel_msr = MSR_KERNEL;
262 }
263 
264 unsigned long spr_default_dscr = 0;
265 
266 void __init record_spr_defaults(void)
267 {
268 	if (early_cpu_has_feature(CPU_FTR_DSCR))
269 		spr_default_dscr = mfspr(SPRN_DSCR);
270 }
271 
272 /*
273  * Early initialization entry point. This is called by head.S
274  * with MMU translation disabled. We rely on the "feature" of
275  * the CPU that ignores the top 2 bits of the address in real
276  * mode so we can access kernel globals normally provided we
277  * only toy with things in the RMO region. From here, we do
278  * some early parsing of the device-tree to setup out MEMBLOCK
279  * data structures, and allocate & initialize the hash table
280  * and segment tables so we can start running with translation
281  * enabled.
282  *
283  * It is this function which will call the probe() callback of
284  * the various platform types and copy the matching one to the
285  * global ppc_md structure. Your platform can eventually do
286  * some very early initializations from the probe() routine, but
287  * this is not recommended, be very careful as, for example, the
288  * device-tree is not accessible via normal means at this point.
289  */
290 
291 void __init early_setup(unsigned long dt_ptr)
292 {
293 	static __initdata struct paca_struct boot_paca;
294 
295 	/* -------- printk is _NOT_ safe to use here ! ------- */
296 
297 	/* Try new device tree based feature discovery ... */
298 	if (!dt_cpu_ftrs_init(__va(dt_ptr)))
299 		/* Otherwise use the old style CPU table */
300 		identify_cpu(0, mfspr(SPRN_PVR));
301 
302 	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
303 	initialise_paca(&boot_paca, 0);
304 	setup_paca(&boot_paca);
305 	fixup_boot_paca();
306 
307 	/* -------- printk is now safe to use ------- */
308 
309 	/* Enable early debugging if any specified (see udbg.h) */
310 	udbg_early_init();
311 
312  	DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
313 
314 	/*
315 	 * Do early initialization using the flattened device
316 	 * tree, such as retrieving the physical memory map or
317 	 * calculating/retrieving the hash table size.
318 	 */
319 	early_init_devtree(__va(dt_ptr));
320 
321 	/* Now we know the logical id of our boot cpu, setup the paca. */
322 	if (boot_cpuid != 0) {
323 		/* Poison paca_ptrs[0] again if it's not the boot cpu */
324 		memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
325 	}
326 	setup_paca(paca_ptrs[boot_cpuid]);
327 	fixup_boot_paca();
328 
329 	/*
330 	 * Configure exception handlers. This include setting up trampolines
331 	 * if needed, setting exception endian mode, etc...
332 	 */
333 	configure_exceptions();
334 
335 	/* Apply all the dynamic patching */
336 	apply_feature_fixups();
337 	setup_feature_keys();
338 
339 	/* Initialize the hash table or TLB handling */
340 	early_init_mmu();
341 
342 	/*
343 	 * After firmware and early platform setup code has set things up,
344 	 * we note the SPR values for configurable control/performance
345 	 * registers, and use those as initial defaults.
346 	 */
347 	record_spr_defaults();
348 
349 	/*
350 	 * At this point, we can let interrupts switch to virtual mode
351 	 * (the MMU has been setup), so adjust the MSR in the PACA to
352 	 * have IR and DR set and enable AIL if it exists
353 	 */
354 	cpu_ready_for_interrupts();
355 
356 	/*
357 	 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
358 	 * will only actually get enabled on the boot cpu much later once
359 	 * ftrace itself has been initialized.
360 	 */
361 	this_cpu_enable_ftrace();
362 
363 	DBG(" <- early_setup()\n");
364 
365 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
366 	/*
367 	 * This needs to be done *last* (after the above DBG() even)
368 	 *
369 	 * Right after we return from this function, we turn on the MMU
370 	 * which means the real-mode access trick that btext does will
371 	 * no longer work, it needs to switch to using a real MMU
372 	 * mapping. This call will ensure that it does
373 	 */
374 	btext_map();
375 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
376 }
377 
378 #ifdef CONFIG_SMP
379 void early_setup_secondary(void)
380 {
381 	/* Mark interrupts disabled in PACA */
382 	irq_soft_mask_set(IRQS_DISABLED);
383 
384 	/* Initialize the hash table or TLB handling */
385 	early_init_mmu_secondary();
386 
387 	/*
388 	 * At this point, we can let interrupts switch to virtual mode
389 	 * (the MMU has been setup), so adjust the MSR in the PACA to
390 	 * have IR and DR set.
391 	 */
392 	cpu_ready_for_interrupts();
393 }
394 
395 #endif /* CONFIG_SMP */
396 
397 void panic_smp_self_stop(void)
398 {
399 	hard_irq_disable();
400 	spin_begin();
401 	while (1)
402 		spin_cpu_relax();
403 }
404 
405 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
406 static bool use_spinloop(void)
407 {
408 	if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
409 		/*
410 		 * See comments in head_64.S -- not all platforms insert
411 		 * secondaries at __secondary_hold and wait at the spin
412 		 * loop.
413 		 */
414 		if (firmware_has_feature(FW_FEATURE_OPAL))
415 			return false;
416 		return true;
417 	}
418 
419 	/*
420 	 * When book3e boots from kexec, the ePAPR spin table does
421 	 * not get used.
422 	 */
423 	return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
424 }
425 
426 void smp_release_cpus(void)
427 {
428 	unsigned long *ptr;
429 	int i;
430 
431 	if (!use_spinloop())
432 		return;
433 
434 	DBG(" -> smp_release_cpus()\n");
435 
436 	/* All secondary cpus are spinning on a common spinloop, release them
437 	 * all now so they can start to spin on their individual paca
438 	 * spinloops. For non SMP kernels, the secondary cpus never get out
439 	 * of the common spinloop.
440 	 */
441 
442 	ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
443 			- PHYSICAL_START);
444 	*ptr = ppc_function_entry(generic_secondary_smp_init);
445 
446 	/* And wait a bit for them to catch up */
447 	for (i = 0; i < 100000; i++) {
448 		mb();
449 		HMT_low();
450 		if (spinning_secondaries == 0)
451 			break;
452 		udelay(1);
453 	}
454 	DBG("spinning_secondaries = %d\n", spinning_secondaries);
455 
456 	DBG(" <- smp_release_cpus()\n");
457 }
458 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
459 
460 /*
461  * Initialize some remaining members of the ppc64_caches and systemcfg
462  * structures
463  * (at least until we get rid of them completely). This is mostly some
464  * cache informations about the CPU that will be used by cache flush
465  * routines and/or provided to userland
466  */
467 
468 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
469 			    u32 bsize, u32 sets)
470 {
471 	info->size = size;
472 	info->sets = sets;
473 	info->line_size = lsize;
474 	info->block_size = bsize;
475 	info->log_block_size = __ilog2(bsize);
476 	if (bsize)
477 		info->blocks_per_page = PAGE_SIZE / bsize;
478 	else
479 		info->blocks_per_page = 0;
480 
481 	if (sets == 0)
482 		info->assoc = 0xffff;
483 	else
484 		info->assoc = size / (sets * lsize);
485 }
486 
487 static bool __init parse_cache_info(struct device_node *np,
488 				    bool icache,
489 				    struct ppc_cache_info *info)
490 {
491 	static const char *ipropnames[] __initdata = {
492 		"i-cache-size",
493 		"i-cache-sets",
494 		"i-cache-block-size",
495 		"i-cache-line-size",
496 	};
497 	static const char *dpropnames[] __initdata = {
498 		"d-cache-size",
499 		"d-cache-sets",
500 		"d-cache-block-size",
501 		"d-cache-line-size",
502 	};
503 	const char **propnames = icache ? ipropnames : dpropnames;
504 	const __be32 *sizep, *lsizep, *bsizep, *setsp;
505 	u32 size, lsize, bsize, sets;
506 	bool success = true;
507 
508 	size = 0;
509 	sets = -1u;
510 	lsize = bsize = cur_cpu_spec->dcache_bsize;
511 	sizep = of_get_property(np, propnames[0], NULL);
512 	if (sizep != NULL)
513 		size = be32_to_cpu(*sizep);
514 	setsp = of_get_property(np, propnames[1], NULL);
515 	if (setsp != NULL)
516 		sets = be32_to_cpu(*setsp);
517 	bsizep = of_get_property(np, propnames[2], NULL);
518 	lsizep = of_get_property(np, propnames[3], NULL);
519 	if (bsizep == NULL)
520 		bsizep = lsizep;
521 	if (lsizep != NULL)
522 		lsize = be32_to_cpu(*lsizep);
523 	if (bsizep != NULL)
524 		bsize = be32_to_cpu(*bsizep);
525 	if (sizep == NULL || bsizep == NULL || lsizep == NULL)
526 		success = false;
527 
528 	/*
529 	 * OF is weird .. it represents fully associative caches
530 	 * as "1 way" which doesn't make much sense and doesn't
531 	 * leave room for direct mapped. We'll assume that 0
532 	 * in OF means direct mapped for that reason.
533 	 */
534 	if (sets == 1)
535 		sets = 0;
536 	else if (sets == 0)
537 		sets = 1;
538 
539 	init_cache_info(info, size, lsize, bsize, sets);
540 
541 	return success;
542 }
543 
544 void __init initialize_cache_info(void)
545 {
546 	struct device_node *cpu = NULL, *l2, *l3 = NULL;
547 	u32 pvr;
548 
549 	DBG(" -> initialize_cache_info()\n");
550 
551 	/*
552 	 * All shipping POWER8 machines have a firmware bug that
553 	 * puts incorrect information in the device-tree. This will
554 	 * be (hopefully) fixed for future chips but for now hard
555 	 * code the values if we are running on one of these
556 	 */
557 	pvr = PVR_VER(mfspr(SPRN_PVR));
558 	if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
559 	    pvr == PVR_POWER8NVL) {
560 						/* size    lsize   blk  sets */
561 		init_cache_info(&ppc64_caches.l1i, 0x8000,   128,  128, 32);
562 		init_cache_info(&ppc64_caches.l1d, 0x10000,  128,  128, 64);
563 		init_cache_info(&ppc64_caches.l2,  0x80000,  128,  0,   512);
564 		init_cache_info(&ppc64_caches.l3,  0x800000, 128,  0,   8192);
565 	} else
566 		cpu = of_find_node_by_type(NULL, "cpu");
567 
568 	/*
569 	 * We're assuming *all* of the CPUs have the same
570 	 * d-cache and i-cache sizes... -Peter
571 	 */
572 	if (cpu) {
573 		if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
574 			DBG("Argh, can't find dcache properties !\n");
575 
576 		if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
577 			DBG("Argh, can't find icache properties !\n");
578 
579 		/*
580 		 * Try to find the L2 and L3 if any. Assume they are
581 		 * unified and use the D-side properties.
582 		 */
583 		l2 = of_find_next_cache_node(cpu);
584 		of_node_put(cpu);
585 		if (l2) {
586 			parse_cache_info(l2, false, &ppc64_caches.l2);
587 			l3 = of_find_next_cache_node(l2);
588 			of_node_put(l2);
589 		}
590 		if (l3) {
591 			parse_cache_info(l3, false, &ppc64_caches.l3);
592 			of_node_put(l3);
593 		}
594 	}
595 
596 	/* For use by binfmt_elf */
597 	dcache_bsize = ppc64_caches.l1d.block_size;
598 	icache_bsize = ppc64_caches.l1i.block_size;
599 
600 	cur_cpu_spec->dcache_bsize = dcache_bsize;
601 	cur_cpu_spec->icache_bsize = icache_bsize;
602 
603 	DBG(" <- initialize_cache_info()\n");
604 }
605 
606 /*
607  * This returns the limit below which memory accesses to the linear
608  * mapping are guarnateed not to cause an architectural exception (e.g.,
609  * TLB or SLB miss fault).
610  *
611  * This is used to allocate PACAs and various interrupt stacks that
612  * that are accessed early in interrupt handlers that must not cause
613  * re-entrant interrupts.
614  */
615 __init u64 ppc64_bolted_size(void)
616 {
617 #ifdef CONFIG_PPC_BOOK3E
618 	/* Freescale BookE bolts the entire linear mapping */
619 	/* XXX: BookE ppc64_rma_limit setup seems to disagree? */
620 	if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
621 		return linear_map_top;
622 	/* Other BookE, we assume the first GB is bolted */
623 	return 1ul << 30;
624 #else
625 	/* BookS radix, does not take faults on linear mapping */
626 	if (early_radix_enabled())
627 		return ULONG_MAX;
628 
629 	/* BookS hash, the first segment is bolted */
630 	if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
631 		return 1UL << SID_SHIFT_1T;
632 	return 1UL << SID_SHIFT;
633 #endif
634 }
635 
636 static void *__init alloc_stack(unsigned long limit, int cpu)
637 {
638 	unsigned long pa;
639 
640 	pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
641 					early_cpu_to_node(cpu), MEMBLOCK_NONE);
642 	if (!pa) {
643 		pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
644 		if (!pa)
645 			panic("cannot allocate stacks");
646 	}
647 
648 	return __va(pa);
649 }
650 
651 void __init irqstack_early_init(void)
652 {
653 	u64 limit = ppc64_bolted_size();
654 	unsigned int i;
655 
656 	/*
657 	 * Interrupt stacks must be in the first segment since we
658 	 * cannot afford to take SLB misses on them. They are not
659 	 * accessed in realmode.
660 	 */
661 	for_each_possible_cpu(i) {
662 		softirq_ctx[i] = alloc_stack(limit, i);
663 		hardirq_ctx[i] = alloc_stack(limit, i);
664 	}
665 }
666 
667 #ifdef CONFIG_PPC_BOOK3E
668 void __init exc_lvl_early_init(void)
669 {
670 	unsigned int i;
671 
672 	for_each_possible_cpu(i) {
673 		void *sp;
674 
675 		sp = alloc_stack(ULONG_MAX, i);
676 		critirq_ctx[i] = sp;
677 		paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
678 
679 		sp = alloc_stack(ULONG_MAX, i);
680 		dbgirq_ctx[i] = sp;
681 		paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
682 
683 		sp = alloc_stack(ULONG_MAX, i);
684 		mcheckirq_ctx[i] = sp;
685 		paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
686 	}
687 
688 	if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
689 		patch_exception(0x040, exc_debug_debug_book3e);
690 }
691 #endif
692 
693 /*
694  * Emergency stacks are used for a range of things, from asynchronous
695  * NMIs (system reset, machine check) to synchronous, process context.
696  * We set preempt_count to zero, even though that isn't necessarily correct. To
697  * get the right value we'd need to copy it from the previous thread_info, but
698  * doing that might fault causing more problems.
699  * TODO: what to do with accounting?
700  */
701 static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
702 {
703 	ti->task = NULL;
704 	ti->cpu = cpu;
705 	ti->preempt_count = 0;
706 	ti->local_flags = 0;
707 	ti->flags = 0;
708 	klp_init_thread_info(ti);
709 }
710 
711 /*
712  * Stack space used when we detect a bad kernel stack pointer, and
713  * early in SMP boots before relocation is enabled. Exclusive emergency
714  * stack for machine checks.
715  */
716 void __init emergency_stack_init(void)
717 {
718 	u64 limit;
719 	unsigned int i;
720 
721 	/*
722 	 * Emergency stacks must be under 256MB, we cannot afford to take
723 	 * SLB misses on them. The ABI also requires them to be 128-byte
724 	 * aligned.
725 	 *
726 	 * Since we use these as temporary stacks during secondary CPU
727 	 * bringup, machine check, system reset, and HMI, we need to get
728 	 * at them in real mode. This means they must also be within the RMO
729 	 * region.
730 	 *
731 	 * The IRQ stacks allocated elsewhere in this file are zeroed and
732 	 * initialized in kernel/irq.c. These are initialized here in order
733 	 * to have emergency stacks available as early as possible.
734 	 */
735 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
736 
737 	for_each_possible_cpu(i) {
738 		struct thread_info *ti;
739 
740 		ti = alloc_stack(limit, i);
741 		memset(ti, 0, THREAD_SIZE);
742 		emerg_stack_init_thread_info(ti, i);
743 		paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
744 
745 #ifdef CONFIG_PPC_BOOK3S_64
746 		/* emergency stack for NMI exception handling. */
747 		ti = alloc_stack(limit, i);
748 		memset(ti, 0, THREAD_SIZE);
749 		emerg_stack_init_thread_info(ti, i);
750 		paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
751 
752 		/* emergency stack for machine check exception handling. */
753 		ti = alloc_stack(limit, i);
754 		memset(ti, 0, THREAD_SIZE);
755 		emerg_stack_init_thread_info(ti, i);
756 		paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
757 #endif
758 	}
759 }
760 
761 #ifdef CONFIG_SMP
762 #define PCPU_DYN_SIZE		()
763 
764 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
765 {
766 	return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
767 				    __pa(MAX_DMA_ADDRESS));
768 }
769 
770 static void __init pcpu_fc_free(void *ptr, size_t size)
771 {
772 	free_bootmem(__pa(ptr), size);
773 }
774 
775 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
776 {
777 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
778 		return LOCAL_DISTANCE;
779 	else
780 		return REMOTE_DISTANCE;
781 }
782 
783 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
784 EXPORT_SYMBOL(__per_cpu_offset);
785 
786 void __init setup_per_cpu_areas(void)
787 {
788 	const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
789 	size_t atom_size;
790 	unsigned long delta;
791 	unsigned int cpu;
792 	int rc;
793 
794 	/*
795 	 * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
796 	 * to group units.  For larger mappings, use 1M atom which
797 	 * should be large enough to contain a number of units.
798 	 */
799 	if (mmu_linear_psize == MMU_PAGE_4K)
800 		atom_size = PAGE_SIZE;
801 	else
802 		atom_size = 1 << 20;
803 
804 	rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
805 				    pcpu_fc_alloc, pcpu_fc_free);
806 	if (rc < 0)
807 		panic("cannot initialize percpu area (err=%d)", rc);
808 
809 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
810 	for_each_possible_cpu(cpu) {
811                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
812 		paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
813 	}
814 }
815 #endif
816 
817 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
818 unsigned long memory_block_size_bytes(void)
819 {
820 	if (ppc_md.memory_block_size)
821 		return ppc_md.memory_block_size();
822 
823 	return MIN_MEMORY_BLOCK_SIZE;
824 }
825 #endif
826 
827 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
828 struct ppc_pci_io ppc_pci_io;
829 EXPORT_SYMBOL(ppc_pci_io);
830 #endif
831 
832 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
833 u64 hw_nmi_get_sample_period(int watchdog_thresh)
834 {
835 	return ppc_proc_freq * watchdog_thresh;
836 }
837 #endif
838 
839 /*
840  * The perf based hardlockup detector breaks PMU event based branches, so
841  * disable it by default. Book3S has a soft-nmi hardlockup detector based
842  * on the decrementer interrupt, so it does not suffer from this problem.
843  *
844  * It is likely to get false positives in VM guests, so disable it there
845  * by default too.
846  */
847 static int __init disable_hardlockup_detector(void)
848 {
849 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
850 	hardlockup_detector_disable();
851 #else
852 	if (firmware_has_feature(FW_FEATURE_LPAR))
853 		hardlockup_detector_disable();
854 #endif
855 
856 	return 0;
857 }
858 early_initcall(disable_hardlockup_detector);
859 
860 #ifdef CONFIG_PPC_BOOK3S_64
861 static enum l1d_flush_type enabled_flush_types;
862 static void *l1d_flush_fallback_area;
863 static bool no_rfi_flush;
864 bool rfi_flush;
865 
866 static int __init handle_no_rfi_flush(char *p)
867 {
868 	pr_info("rfi-flush: disabled on command line.");
869 	no_rfi_flush = true;
870 	return 0;
871 }
872 early_param("no_rfi_flush", handle_no_rfi_flush);
873 
874 /*
875  * The RFI flush is not KPTI, but because users will see doco that says to use
876  * nopti we hijack that option here to also disable the RFI flush.
877  */
878 static int __init handle_no_pti(char *p)
879 {
880 	pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
881 	handle_no_rfi_flush(NULL);
882 	return 0;
883 }
884 early_param("nopti", handle_no_pti);
885 
886 static void do_nothing(void *unused)
887 {
888 	/*
889 	 * We don't need to do the flush explicitly, just enter+exit kernel is
890 	 * sufficient, the RFI exit handlers will do the right thing.
891 	 */
892 }
893 
894 void rfi_flush_enable(bool enable)
895 {
896 	if (enable) {
897 		do_rfi_flush_fixups(enabled_flush_types);
898 		on_each_cpu(do_nothing, NULL, 1);
899 	} else
900 		do_rfi_flush_fixups(L1D_FLUSH_NONE);
901 
902 	rfi_flush = enable;
903 }
904 
905 static void __ref init_fallback_flush(void)
906 {
907 	u64 l1d_size, limit;
908 	int cpu;
909 
910 	/* Only allocate the fallback flush area once (at boot time). */
911 	if (l1d_flush_fallback_area)
912 		return;
913 
914 	l1d_size = ppc64_caches.l1d.size;
915 
916 	/*
917 	 * If there is no d-cache-size property in the device tree, l1d_size
918 	 * could be zero. That leads to the loop in the asm wrapping around to
919 	 * 2^64-1, and then walking off the end of the fallback area and
920 	 * eventually causing a page fault which is fatal. Just default to
921 	 * something vaguely sane.
922 	 */
923 	if (!l1d_size)
924 		l1d_size = (64 * 1024);
925 
926 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
927 
928 	/*
929 	 * Align to L1d size, and size it at 2x L1d size, to catch possible
930 	 * hardware prefetch runoff. We don't have a recipe for load patterns to
931 	 * reliably avoid the prefetcher.
932 	 */
933 	l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
934 	memset(l1d_flush_fallback_area, 0, l1d_size * 2);
935 
936 	for_each_possible_cpu(cpu) {
937 		struct paca_struct *paca = paca_ptrs[cpu];
938 		paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
939 		paca->l1d_flush_size = l1d_size;
940 	}
941 }
942 
943 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
944 {
945 	if (types & L1D_FLUSH_FALLBACK) {
946 		pr_info("rfi-flush: fallback displacement flush available\n");
947 		init_fallback_flush();
948 	}
949 
950 	if (types & L1D_FLUSH_ORI)
951 		pr_info("rfi-flush: ori type flush available\n");
952 
953 	if (types & L1D_FLUSH_MTTRIG)
954 		pr_info("rfi-flush: mttrig type flush available\n");
955 
956 	enabled_flush_types = types;
957 
958 	if (!no_rfi_flush)
959 		rfi_flush_enable(enable);
960 }
961 
962 #ifdef CONFIG_DEBUG_FS
963 static int rfi_flush_set(void *data, u64 val)
964 {
965 	bool enable;
966 
967 	if (val == 1)
968 		enable = true;
969 	else if (val == 0)
970 		enable = false;
971 	else
972 		return -EINVAL;
973 
974 	/* Only do anything if we're changing state */
975 	if (enable != rfi_flush)
976 		rfi_flush_enable(enable);
977 
978 	return 0;
979 }
980 
981 static int rfi_flush_get(void *data, u64 *val)
982 {
983 	*val = rfi_flush ? 1 : 0;
984 	return 0;
985 }
986 
987 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
988 
989 static __init int rfi_flush_debugfs_init(void)
990 {
991 	debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
992 	return 0;
993 }
994 device_initcall(rfi_flush_debugfs_init);
995 #endif
996 #endif /* CONFIG_PPC_BOOK3S_64 */
997