xref: /linux/arch/powerpc/kernel/dt_cpu_ftrs.c (revision b6f534d1a642a9b6263fd52df30806171fbc331e)
1 /*
2  * Copyright 2017, Nicholas Piggin, IBM Corporation
3  * Licensed under GPLv2.
4  */
5 
6 #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
7 
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/jump_label.h>
11 #include <linux/libfdt.h>
12 #include <linux/memblock.h>
13 #include <linux/printk.h>
14 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/threads.h>
17 
18 #include <asm/cputable.h>
19 #include <asm/dt_cpu_ftrs.h>
20 #include <asm/mmu.h>
21 #include <asm/oprofile_impl.h>
22 #include <asm/prom.h>
23 #include <asm/setup.h>
24 
25 
26 /* Device-tree visible constants follow */
27 #define ISA_V2_07B      2070
28 #define ISA_V3_0B       3000
29 
30 #define USABLE_PR               (1U << 0)
31 #define USABLE_OS               (1U << 1)
32 #define USABLE_HV               (1U << 2)
33 
34 #define HV_SUPPORT_HFSCR        (1U << 0)
35 #define OS_SUPPORT_FSCR         (1U << 0)
36 
37 /* For parsing, we define all bits set as "NONE" case */
38 #define HV_SUPPORT_NONE		0xffffffffU
39 #define OS_SUPPORT_NONE		0xffffffffU
40 
41 struct dt_cpu_feature {
42 	const char *name;
43 	uint32_t isa;
44 	uint32_t usable_privilege;
45 	uint32_t hv_support;
46 	uint32_t os_support;
47 	uint32_t hfscr_bit_nr;
48 	uint32_t fscr_bit_nr;
49 	uint32_t hwcap_bit_nr;
50 	/* fdt parsing */
51 	unsigned long node;
52 	int enabled;
53 	int disabled;
54 };
55 
56 #define CPU_FTRS_BASE \
57 	   (CPU_FTR_LWSYNC | \
58 	    CPU_FTR_FPU_UNAVAILABLE |\
59 	    CPU_FTR_NODSISRALIGN |\
60 	    CPU_FTR_NOEXECUTE |\
61 	    CPU_FTR_COHERENT_ICACHE | \
62 	    CPU_FTR_STCX_CHECKS_ADDRESS |\
63 	    CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
64 	    CPU_FTR_DAWR | \
65 	    CPU_FTR_ARCH_206 |\
66 	    CPU_FTR_ARCH_207S)
67 
68 #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
69 
70 #define COMMON_USER_BASE	(PPC_FEATURE_32 | PPC_FEATURE_64 | \
71 				 PPC_FEATURE_ARCH_2_06 |\
72 				 PPC_FEATURE_ICACHE_SNOOP)
73 #define COMMON_USER2_BASE	(PPC_FEATURE2_ARCH_2_07 | \
74 				 PPC_FEATURE2_ISEL)
75 /*
76  * Set up the base CPU
77  */
78 
79 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
80 extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
81 
82 static int hv_mode;
83 
84 static struct {
85 	u64	lpcr;
86 	u64	hfscr;
87 	u64	fscr;
88 } system_registers;
89 
90 static void (*init_pmu_registers)(void);
91 
92 static void __restore_cpu_cpufeatures(void)
93 {
94 	/*
95 	 * LPCR is restored by the power on engine already. It can be changed
96 	 * after early init e.g., by radix enable, and we have no unified API
97 	 * for saving and restoring such SPRs.
98 	 *
99 	 * This ->restore hook should really be removed from idle and register
100 	 * restore moved directly into the idle restore code, because this code
101 	 * doesn't know how idle is implemented or what it needs restored here.
102 	 *
103 	 * The best we can do to accommodate secondary boot and idle restore
104 	 * for now is "or" LPCR with existing.
105 	 */
106 
107 	mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
108 	if (hv_mode) {
109 		mtspr(SPRN_LPID, 0);
110 		mtspr(SPRN_HFSCR, system_registers.hfscr);
111 	}
112 	mtspr(SPRN_FSCR, system_registers.fscr);
113 
114 	if (init_pmu_registers)
115 		init_pmu_registers();
116 }
117 
118 static char dt_cpu_name[64];
119 
120 static struct cpu_spec __initdata base_cpu_spec = {
121 	.cpu_name		= NULL,
122 	.cpu_features		= CPU_FTRS_BASE,
123 	.cpu_user_features	= COMMON_USER_BASE,
124 	.cpu_user_features2	= COMMON_USER2_BASE,
125 	.mmu_features		= 0,
126 	.icache_bsize		= 32, /* minimum block size, fixed by */
127 	.dcache_bsize		= 32, /* cache info init.             */
128 	.num_pmcs		= 0,
129 	.pmc_type		= PPC_PMC_DEFAULT,
130 	.oprofile_cpu_type	= NULL,
131 	.oprofile_type		= PPC_OPROFILE_INVALID,
132 	.cpu_setup		= NULL,
133 	.cpu_restore		= __restore_cpu_cpufeatures,
134 	.machine_check_early	= NULL,
135 	.platform		= NULL,
136 };
137 
138 static void __init cpufeatures_setup_cpu(void)
139 {
140 	set_cur_cpu_spec(&base_cpu_spec);
141 
142 	cur_cpu_spec->pvr_mask = -1;
143 	cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
144 
145 	/* Initialize the base environment -- clear FSCR/HFSCR.  */
146 	hv_mode = !!(mfmsr() & MSR_HV);
147 	if (hv_mode) {
148 		/* CPU_FTR_HVMODE is used early in PACA setup */
149 		cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
150 		mtspr(SPRN_HFSCR, 0);
151 	}
152 	mtspr(SPRN_FSCR, 0);
153 
154 	/*
155 	 * LPCR does not get cleared, to match behaviour with secondaries
156 	 * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
157 	 * could clear LPCR too.
158 	 */
159 }
160 
161 static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
162 {
163 	if (f->hv_support == HV_SUPPORT_NONE) {
164 	} else if (f->hv_support & HV_SUPPORT_HFSCR) {
165 		u64 hfscr = mfspr(SPRN_HFSCR);
166 		hfscr |= 1UL << f->hfscr_bit_nr;
167 		mtspr(SPRN_HFSCR, hfscr);
168 	} else {
169 		/* Does not have a known recipe */
170 		return 0;
171 	}
172 
173 	if (f->os_support == OS_SUPPORT_NONE) {
174 	} else if (f->os_support & OS_SUPPORT_FSCR) {
175 		u64 fscr = mfspr(SPRN_FSCR);
176 		fscr |= 1UL << f->fscr_bit_nr;
177 		mtspr(SPRN_FSCR, fscr);
178 	} else {
179 		/* Does not have a known recipe */
180 		return 0;
181 	}
182 
183 	if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
184 		uint32_t word = f->hwcap_bit_nr / 32;
185 		uint32_t bit = f->hwcap_bit_nr % 32;
186 
187 		if (word == 0)
188 			cur_cpu_spec->cpu_user_features |= 1U << bit;
189 		else if (word == 1)
190 			cur_cpu_spec->cpu_user_features2 |= 1U << bit;
191 		else
192 			pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
193 	}
194 
195 	return 1;
196 }
197 
198 static int __init feat_enable(struct dt_cpu_feature *f)
199 {
200 	if (f->hv_support != HV_SUPPORT_NONE) {
201 		if (f->hfscr_bit_nr != -1) {
202 			u64 hfscr = mfspr(SPRN_HFSCR);
203 			hfscr |= 1UL << f->hfscr_bit_nr;
204 			mtspr(SPRN_HFSCR, hfscr);
205 		}
206 	}
207 
208 	if (f->os_support != OS_SUPPORT_NONE) {
209 		if (f->fscr_bit_nr != -1) {
210 			u64 fscr = mfspr(SPRN_FSCR);
211 			fscr |= 1UL << f->fscr_bit_nr;
212 			mtspr(SPRN_FSCR, fscr);
213 		}
214 	}
215 
216 	if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
217 		uint32_t word = f->hwcap_bit_nr / 32;
218 		uint32_t bit = f->hwcap_bit_nr % 32;
219 
220 		if (word == 0)
221 			cur_cpu_spec->cpu_user_features |= 1U << bit;
222 		else if (word == 1)
223 			cur_cpu_spec->cpu_user_features2 |= 1U << bit;
224 		else
225 			pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
226 	}
227 
228 	return 1;
229 }
230 
231 static int __init feat_disable(struct dt_cpu_feature *f)
232 {
233 	return 0;
234 }
235 
236 static int __init feat_enable_hv(struct dt_cpu_feature *f)
237 {
238 	u64 lpcr;
239 
240 	if (!hv_mode) {
241 		pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
242 		return 0;
243 	}
244 
245 	mtspr(SPRN_LPID, 0);
246 
247 	lpcr = mfspr(SPRN_LPCR);
248 	lpcr &=  ~LPCR_LPES0; /* HV external interrupts */
249 	mtspr(SPRN_LPCR, lpcr);
250 
251 	cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
252 
253 	return 1;
254 }
255 
256 static int __init feat_enable_le(struct dt_cpu_feature *f)
257 {
258 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
259 	return 1;
260 }
261 
262 static int __init feat_enable_smt(struct dt_cpu_feature *f)
263 {
264 	cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
265 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
266 	return 1;
267 }
268 
269 static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
270 {
271 	u64 lpcr;
272 
273 	/* Set PECE wakeup modes for ISA 207 */
274 	lpcr = mfspr(SPRN_LPCR);
275 	lpcr |=  LPCR_PECE0;
276 	lpcr |=  LPCR_PECE1;
277 	lpcr |=  LPCR_PECE2;
278 	mtspr(SPRN_LPCR, lpcr);
279 
280 	return 1;
281 }
282 
283 static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
284 {
285 	cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
286 
287 	return 1;
288 }
289 
290 static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
291 {
292 	u64 lpcr;
293 
294 	/* Set PECE wakeup modes for ISAv3.0B */
295 	lpcr = mfspr(SPRN_LPCR);
296 	lpcr |=  LPCR_PECE0;
297 	lpcr |=  LPCR_PECE1;
298 	lpcr |=  LPCR_PECE2;
299 	mtspr(SPRN_LPCR, lpcr);
300 
301 	return 1;
302 }
303 
304 static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
305 {
306 	u64 lpcr;
307 
308 	lpcr = mfspr(SPRN_LPCR);
309 	lpcr &= ~LPCR_ISL;
310 
311 	/* VRMASD */
312 	lpcr |= LPCR_VPM0;
313 	lpcr &= ~LPCR_VPM1;
314 	lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
315 	mtspr(SPRN_LPCR, lpcr);
316 
317 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
318 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
319 
320 	return 1;
321 }
322 
323 static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
324 {
325 	u64 lpcr;
326 
327 	lpcr = mfspr(SPRN_LPCR);
328 	lpcr &= ~LPCR_ISL;
329 	mtspr(SPRN_LPCR, lpcr);
330 
331 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
332 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
333 
334 	return 1;
335 }
336 
337 
338 static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
339 {
340 #ifdef CONFIG_PPC_RADIX_MMU
341 	cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
342 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
343 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
344 
345 	return 1;
346 #endif
347 	return 0;
348 }
349 
350 static int __init feat_enable_dscr(struct dt_cpu_feature *f)
351 {
352 	u64 lpcr;
353 
354 	feat_enable(f);
355 
356 	lpcr = mfspr(SPRN_LPCR);
357 	lpcr &= ~LPCR_DPFD;
358 	lpcr |=  (4UL << LPCR_DPFD_SH);
359 	mtspr(SPRN_LPCR, lpcr);
360 
361 	return 1;
362 }
363 
364 static void hfscr_pmu_enable(void)
365 {
366 	u64 hfscr = mfspr(SPRN_HFSCR);
367 	hfscr |= PPC_BIT(60);
368 	mtspr(SPRN_HFSCR, hfscr);
369 }
370 
371 static void init_pmu_power8(void)
372 {
373 	if (hv_mode) {
374 		mtspr(SPRN_MMCRC, 0);
375 		mtspr(SPRN_MMCRH, 0);
376 	}
377 
378 	mtspr(SPRN_MMCRA, 0);
379 	mtspr(SPRN_MMCR0, 0);
380 	mtspr(SPRN_MMCR1, 0);
381 	mtspr(SPRN_MMCR2, 0);
382 	mtspr(SPRN_MMCRS, 0);
383 }
384 
385 static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
386 {
387 	cur_cpu_spec->platform = "power8";
388 	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
389 
390 	return 1;
391 }
392 
393 static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
394 {
395 	hfscr_pmu_enable();
396 
397 	init_pmu_power8();
398 	init_pmu_registers = init_pmu_power8;
399 
400 	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
401 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
402 	if (pvr_version_is(PVR_POWER8E))
403 		cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
404 
405 	cur_cpu_spec->num_pmcs		= 6;
406 	cur_cpu_spec->pmc_type		= PPC_PMC_IBM;
407 	cur_cpu_spec->oprofile_cpu_type	= "ppc64/power8";
408 
409 	return 1;
410 }
411 
412 static void init_pmu_power9(void)
413 {
414 	if (hv_mode)
415 		mtspr(SPRN_MMCRC, 0);
416 
417 	mtspr(SPRN_MMCRA, 0);
418 	mtspr(SPRN_MMCR0, 0);
419 	mtspr(SPRN_MMCR1, 0);
420 	mtspr(SPRN_MMCR2, 0);
421 }
422 
423 static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
424 {
425 	cur_cpu_spec->platform = "power9";
426 	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
427 
428 	return 1;
429 }
430 
431 static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
432 {
433 	hfscr_pmu_enable();
434 
435 	init_pmu_power9();
436 	init_pmu_registers = init_pmu_power9;
437 
438 	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
439 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
440 
441 	cur_cpu_spec->num_pmcs		= 6;
442 	cur_cpu_spec->pmc_type		= PPC_PMC_IBM;
443 	cur_cpu_spec->oprofile_cpu_type	= "ppc64/power9";
444 
445 	return 1;
446 }
447 
448 static int __init feat_enable_tm(struct dt_cpu_feature *f)
449 {
450 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
451 	feat_enable(f);
452 	cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
453 	return 1;
454 #endif
455 	return 0;
456 }
457 
458 static int __init feat_enable_fp(struct dt_cpu_feature *f)
459 {
460 	feat_enable(f);
461 	cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
462 
463 	return 1;
464 }
465 
466 static int __init feat_enable_vector(struct dt_cpu_feature *f)
467 {
468 #ifdef CONFIG_ALTIVEC
469 	feat_enable(f);
470 	cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
471 	cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
472 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
473 
474 	return 1;
475 #endif
476 	return 0;
477 }
478 
479 static int __init feat_enable_vsx(struct dt_cpu_feature *f)
480 {
481 #ifdef CONFIG_VSX
482 	feat_enable(f);
483 	cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
484 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
485 
486 	return 1;
487 #endif
488 	return 0;
489 }
490 
491 static int __init feat_enable_purr(struct dt_cpu_feature *f)
492 {
493 	cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
494 
495 	return 1;
496 }
497 
498 static int __init feat_enable_ebb(struct dt_cpu_feature *f)
499 {
500 	/*
501 	 * PPC_FEATURE2_EBB is enabled in PMU init code because it has
502 	 * historically been related to the PMU facility. This may have
503 	 * to be decoupled if EBB becomes more generic. For now, follow
504 	 * existing convention.
505 	 */
506 	f->hwcap_bit_nr = -1;
507 	feat_enable(f);
508 
509 	return 1;
510 }
511 
512 static int __init feat_enable_dbell(struct dt_cpu_feature *f)
513 {
514 	u64 lpcr;
515 
516 	/* P9 has an HFSCR for privileged state */
517 	feat_enable(f);
518 
519 	cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
520 
521 	lpcr = mfspr(SPRN_LPCR);
522 	lpcr |=  LPCR_PECEDH; /* hyp doorbell wakeup */
523 	mtspr(SPRN_LPCR, lpcr);
524 
525 	return 1;
526 }
527 
528 static int __init feat_enable_hvi(struct dt_cpu_feature *f)
529 {
530 	u64 lpcr;
531 
532 	/*
533 	 * POWER9 XIVE interrupts including in OPAL XICS compatibility
534 	 * are always delivered as hypervisor virtualization interrupts (HVI)
535 	 * rather than EE.
536 	 *
537 	 * However LPES0 is not set here, in the chance that an EE does get
538 	 * delivered to the host somehow, the EE handler would not expect it
539 	 * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
540 	 * happen if there is a bug in interrupt controller code, or IC is
541 	 * misconfigured in systemsim.
542 	 */
543 
544 	lpcr = mfspr(SPRN_LPCR);
545 	lpcr |= LPCR_HVICE;	/* enable hvi interrupts */
546 	lpcr |= LPCR_HEIC;	/* disable ee interrupts when MSR_HV */
547 	lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
548 	mtspr(SPRN_LPCR, lpcr);
549 
550 	return 1;
551 }
552 
553 static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
554 {
555 	cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
556 
557 	return 1;
558 }
559 
560 struct dt_cpu_feature_match {
561 	const char *name;
562 	int (*enable)(struct dt_cpu_feature *f);
563 	u64 cpu_ftr_bit_mask;
564 };
565 
566 static struct dt_cpu_feature_match __initdata
567 		dt_cpu_feature_match_table[] = {
568 	{"hypervisor", feat_enable_hv, 0},
569 	{"big-endian", feat_enable, 0},
570 	{"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
571 	{"smt", feat_enable_smt, 0},
572 	{"interrupt-facilities", feat_enable, 0},
573 	{"timer-facilities", feat_enable, 0},
574 	{"timer-facilities-v3", feat_enable, 0},
575 	{"debug-facilities", feat_enable, 0},
576 	{"come-from-address-register", feat_enable, CPU_FTR_CFAR},
577 	{"branch-tracing", feat_enable, 0},
578 	{"floating-point", feat_enable_fp, 0},
579 	{"vector", feat_enable_vector, 0},
580 	{"vector-scalar", feat_enable_vsx, 0},
581 	{"vector-scalar-v3", feat_enable, 0},
582 	{"decimal-floating-point", feat_enable, 0},
583 	{"decimal-integer", feat_enable, 0},
584 	{"quadword-load-store", feat_enable, 0},
585 	{"vector-crypto", feat_enable, 0},
586 	{"mmu-hash", feat_enable_mmu_hash, 0},
587 	{"mmu-radix", feat_enable_mmu_radix, 0},
588 	{"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
589 	{"virtual-page-class-key-protection", feat_enable, 0},
590 	{"transactional-memory", feat_enable_tm, CPU_FTR_TM},
591 	{"transactional-memory-v3", feat_enable_tm, 0},
592 	{"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
593 	{"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
594 	{"idle-nap", feat_enable_idle_nap, 0},
595 	{"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
596 	{"idle-stop", feat_enable_idle_stop, 0},
597 	{"machine-check-power8", feat_enable_mce_power8, 0},
598 	{"performance-monitor-power8", feat_enable_pmu_power8, 0},
599 	{"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
600 	{"event-based-branch", feat_enable_ebb, 0},
601 	{"target-address-register", feat_enable, 0},
602 	{"branch-history-rolling-buffer", feat_enable, 0},
603 	{"control-register", feat_enable, CPU_FTR_CTRL},
604 	{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
605 	{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
606 	{"processor-utilization-of-resources-register", feat_enable_purr, 0},
607 	{"no-execute", feat_enable, 0},
608 	{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
609 	{"cache-inhibited-large-page", feat_enable_large_ci, 0},
610 	{"coprocessor-icswx", feat_enable, 0},
611 	{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
612 	{"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
613 	{"wait", feat_enable, 0},
614 	{"atomic-memory-operations", feat_enable, 0},
615 	{"branch-v3", feat_enable, 0},
616 	{"copy-paste", feat_enable, 0},
617 	{"decimal-floating-point-v3", feat_enable, 0},
618 	{"decimal-integer-v3", feat_enable, 0},
619 	{"fixed-point-v3", feat_enable, 0},
620 	{"floating-point-v3", feat_enable, 0},
621 	{"group-start-register", feat_enable, 0},
622 	{"pc-relative-addressing", feat_enable, 0},
623 	{"machine-check-power9", feat_enable_mce_power9, 0},
624 	{"performance-monitor-power9", feat_enable_pmu_power9, 0},
625 	{"event-based-branch-v3", feat_enable, 0},
626 	{"random-number-generator", feat_enable, 0},
627 	{"system-call-vectored", feat_disable, 0},
628 	{"trace-interrupt-v3", feat_enable, 0},
629 	{"vector-v3", feat_enable, 0},
630 	{"vector-binary128", feat_enable, 0},
631 	{"vector-binary16", feat_enable, 0},
632 	{"wait-v3", feat_enable, 0},
633 };
634 
635 static bool __initdata using_dt_cpu_ftrs;
636 static bool __initdata enable_unknown = true;
637 
638 static int __init dt_cpu_ftrs_parse(char *str)
639 {
640 	if (!str)
641 		return 0;
642 
643 	if (!strcmp(str, "off"))
644 		using_dt_cpu_ftrs = false;
645 	else if (!strcmp(str, "known"))
646 		enable_unknown = false;
647 	else
648 		return 1;
649 
650 	return 0;
651 }
652 early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
653 
654 static void __init cpufeatures_setup_start(u32 isa)
655 {
656 	pr_info("setup for ISA %d\n", isa);
657 
658 	if (isa >= 3000) {
659 		cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
660 		cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
661 	}
662 }
663 
664 static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
665 {
666 	const struct dt_cpu_feature_match *m;
667 	bool known = false;
668 	int i;
669 
670 	for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
671 		m = &dt_cpu_feature_match_table[i];
672 		if (!strcmp(f->name, m->name)) {
673 			known = true;
674 			if (m->enable(f))
675 				break;
676 
677 			pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
678 				f->name);
679 			return false;
680 		}
681 	}
682 
683 	if (!known && enable_unknown) {
684 		if (!feat_try_enable_unknown(f)) {
685 			pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
686 				f->name);
687 			return false;
688 		}
689 	}
690 
691 	if (m->cpu_ftr_bit_mask)
692 		cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
693 
694 	if (known)
695 		pr_debug("enabling: %s\n", f->name);
696 	else
697 		pr_debug("enabling: %s (unknown)\n", f->name);
698 
699 	return true;
700 }
701 
702 static __init void cpufeatures_cpu_quirks(void)
703 {
704 	int version = mfspr(SPRN_PVR);
705 
706 	/*
707 	 * Not all quirks can be derived from the cpufeatures device tree.
708 	 */
709 	if ((version & 0xffffff00) == 0x004e0100)
710 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
711 	else if ((version & 0xffffefff) == 0x004e0200)
712 		; /* DD2.0 has no feature flag */
713 	else if ((version & 0xffffefff) == 0x004e0201)
714 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
715 	else if ((version & 0xffffefff) == 0x004e0202) {
716 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
717 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
718 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
719 	} else /* DD2.1 and up have DD2_1 */
720 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
721 
722 	if ((version & 0xffff0000) == 0x004e0000) {
723 		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
724 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
725 	}
726 }
727 
728 static void __init cpufeatures_setup_finished(void)
729 {
730 	cpufeatures_cpu_quirks();
731 
732 	if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
733 		pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
734 		cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
735 	}
736 
737 	/* Make sure powerpc_base_platform is non-NULL */
738 	powerpc_base_platform = cur_cpu_spec->platform;
739 
740 	system_registers.lpcr = mfspr(SPRN_LPCR);
741 	system_registers.hfscr = mfspr(SPRN_HFSCR);
742 	system_registers.fscr = mfspr(SPRN_FSCR);
743 
744 	pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
745 		cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
746 }
747 
748 static int __init disabled_on_cmdline(void)
749 {
750 	unsigned long root, chosen;
751 	const char *p;
752 
753 	root = of_get_flat_dt_root();
754 	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
755 	if (chosen == -FDT_ERR_NOTFOUND)
756 		return false;
757 
758 	p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
759 	if (!p)
760 		return false;
761 
762 	if (strstr(p, "dt_cpu_ftrs=off"))
763 		return true;
764 
765 	return false;
766 }
767 
768 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
769 					int depth, void *data)
770 {
771 	if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
772 	    && of_get_flat_dt_prop(node, "isa", NULL))
773 		return 1;
774 
775 	return 0;
776 }
777 
778 bool __init dt_cpu_ftrs_in_use(void)
779 {
780 	return using_dt_cpu_ftrs;
781 }
782 
783 bool __init dt_cpu_ftrs_init(void *fdt)
784 {
785 	using_dt_cpu_ftrs = false;
786 
787 	/* Setup and verify the FDT, if it fails we just bail */
788 	if (!early_init_dt_verify(fdt))
789 		return false;
790 
791 	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
792 		return false;
793 
794 	if (disabled_on_cmdline())
795 		return false;
796 
797 	cpufeatures_setup_cpu();
798 
799 	using_dt_cpu_ftrs = true;
800 	return true;
801 }
802 
803 static int nr_dt_cpu_features;
804 static struct dt_cpu_feature *dt_cpu_features;
805 
806 static int __init process_cpufeatures_node(unsigned long node,
807 					  const char *uname, int i)
808 {
809 	const __be32 *prop;
810 	struct dt_cpu_feature *f;
811 	int len;
812 
813 	f = &dt_cpu_features[i];
814 	memset(f, 0, sizeof(struct dt_cpu_feature));
815 
816 	f->node = node;
817 
818 	f->name = uname;
819 
820 	prop = of_get_flat_dt_prop(node, "isa", &len);
821 	if (!prop) {
822 		pr_warn("%s: missing isa property\n", uname);
823 		return 0;
824 	}
825 	f->isa = be32_to_cpup(prop);
826 
827 	prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
828 	if (!prop) {
829 		pr_warn("%s: missing usable-privilege property", uname);
830 		return 0;
831 	}
832 	f->usable_privilege = be32_to_cpup(prop);
833 
834 	prop = of_get_flat_dt_prop(node, "hv-support", &len);
835 	if (prop)
836 		f->hv_support = be32_to_cpup(prop);
837 	else
838 		f->hv_support = HV_SUPPORT_NONE;
839 
840 	prop = of_get_flat_dt_prop(node, "os-support", &len);
841 	if (prop)
842 		f->os_support = be32_to_cpup(prop);
843 	else
844 		f->os_support = OS_SUPPORT_NONE;
845 
846 	prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
847 	if (prop)
848 		f->hfscr_bit_nr = be32_to_cpup(prop);
849 	else
850 		f->hfscr_bit_nr = -1;
851 	prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
852 	if (prop)
853 		f->fscr_bit_nr = be32_to_cpup(prop);
854 	else
855 		f->fscr_bit_nr = -1;
856 	prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
857 	if (prop)
858 		f->hwcap_bit_nr = be32_to_cpup(prop);
859 	else
860 		f->hwcap_bit_nr = -1;
861 
862 	if (f->usable_privilege & USABLE_HV) {
863 		if (!(mfmsr() & MSR_HV)) {
864 			pr_warn("%s: HV feature passed to guest\n", uname);
865 			return 0;
866 		}
867 
868 		if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
869 			pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
870 			return 0;
871 		}
872 
873 		if (f->hv_support == HV_SUPPORT_HFSCR) {
874 			if (f->hfscr_bit_nr == -1) {
875 				pr_warn("%s: missing hfscr_bit_nr\n", uname);
876 				return 0;
877 			}
878 		}
879 	} else {
880 		if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
881 			pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
882 			return 0;
883 		}
884 	}
885 
886 	if (f->usable_privilege & USABLE_OS) {
887 		if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
888 			pr_warn("%s: unwanted fscr_bit_nr\n", uname);
889 			return 0;
890 		}
891 
892 		if (f->os_support == OS_SUPPORT_FSCR) {
893 			if (f->fscr_bit_nr == -1) {
894 				pr_warn("%s: missing fscr_bit_nr\n", uname);
895 				return 0;
896 			}
897 		}
898 	} else {
899 		if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
900 			pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
901 			return 0;
902 		}
903 	}
904 
905 	if (!(f->usable_privilege & USABLE_PR)) {
906 		if (f->hwcap_bit_nr != -1) {
907 			pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
908 			return 0;
909 		}
910 	}
911 
912 	/* Do all the independent features in the first pass */
913 	if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
914 		if (cpufeatures_process_feature(f))
915 			f->enabled = 1;
916 		else
917 			f->disabled = 1;
918 	}
919 
920 	return 0;
921 }
922 
923 static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
924 {
925 	const __be32 *prop;
926 	int len;
927 	int nr_deps;
928 	int i;
929 
930 	if (f->enabled || f->disabled)
931 		return;
932 
933 	prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
934 	if (!prop) {
935 		pr_warn("%s: missing dependencies property", f->name);
936 		return;
937 	}
938 
939 	nr_deps = len / sizeof(int);
940 
941 	for (i = 0; i < nr_deps; i++) {
942 		unsigned long phandle = be32_to_cpu(prop[i]);
943 		int j;
944 
945 		for (j = 0; j < nr_dt_cpu_features; j++) {
946 			struct dt_cpu_feature *d = &dt_cpu_features[j];
947 
948 			if (of_get_flat_dt_phandle(d->node) == phandle) {
949 				cpufeatures_deps_enable(d);
950 				if (d->disabled) {
951 					f->disabled = 1;
952 					return;
953 				}
954 			}
955 		}
956 	}
957 
958 	if (cpufeatures_process_feature(f))
959 		f->enabled = 1;
960 	else
961 		f->disabled = 1;
962 }
963 
964 static int __init scan_cpufeatures_subnodes(unsigned long node,
965 					  const char *uname,
966 					  void *data)
967 {
968 	int *count = data;
969 
970 	process_cpufeatures_node(node, uname, *count);
971 
972 	(*count)++;
973 
974 	return 0;
975 }
976 
977 static int __init count_cpufeatures_subnodes(unsigned long node,
978 					  const char *uname,
979 					  void *data)
980 {
981 	int *count = data;
982 
983 	(*count)++;
984 
985 	return 0;
986 }
987 
988 static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
989 					    *uname, int depth, void *data)
990 {
991 	const __be32 *prop;
992 	int count, i;
993 	u32 isa;
994 
995 	/* We are scanning "ibm,powerpc-cpu-features" nodes only */
996 	if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
997 		return 0;
998 
999 	prop = of_get_flat_dt_prop(node, "isa", NULL);
1000 	if (!prop)
1001 		/* We checked before, "can't happen" */
1002 		return 0;
1003 
1004 	isa = be32_to_cpup(prop);
1005 
1006 	/* Count and allocate space for cpu features */
1007 	of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
1008 						&nr_dt_cpu_features);
1009 	dt_cpu_features = __va(
1010 		memblock_alloc(sizeof(struct dt_cpu_feature)*
1011 				nr_dt_cpu_features, PAGE_SIZE));
1012 
1013 	cpufeatures_setup_start(isa);
1014 
1015 	/* Scan nodes into dt_cpu_features and enable those without deps  */
1016 	count = 0;
1017 	of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
1018 
1019 	/* Recursive enable remaining features with dependencies */
1020 	for (i = 0; i < nr_dt_cpu_features; i++) {
1021 		struct dt_cpu_feature *f = &dt_cpu_features[i];
1022 
1023 		cpufeatures_deps_enable(f);
1024 	}
1025 
1026 	prop = of_get_flat_dt_prop(node, "display-name", NULL);
1027 	if (prop && strlen((char *)prop) != 0) {
1028 		strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
1029 		cur_cpu_spec->cpu_name = dt_cpu_name;
1030 	}
1031 
1032 	cpufeatures_setup_finished();
1033 
1034 	memblock_free(__pa(dt_cpu_features),
1035 			sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
1036 
1037 	return 0;
1038 }
1039 
1040 void __init dt_cpu_ftrs_scan(void)
1041 {
1042 	if (!using_dt_cpu_ftrs)
1043 		return;
1044 
1045 	of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1046 }
1047