xref: /linux/arch/powerpc/platforms/pseries/setup.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  *  64-bit pSeries and RS/6000 setup code.
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Adapted from 'alpha' version by Gary Thomas
6  *  Modified by Cort Dougan (cort@cs.nmt.edu)
7  *  Modified by PPC64 Team, IBM Corp
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 /*
16  * bootup setup stuff..
17  */
18 
19 #undef DEBUG
20 
21 #include <linux/config.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/a.out.h>
32 #include <linux/tty.h>
33 #include <linux/major.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/init.h>
37 #include <linux/ioport.h>
38 #include <linux/console.h>
39 #include <linux/pci.h>
40 #include <linux/utsname.h>
41 #include <linux/adb.h>
42 #include <linux/module.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/seq_file.h>
46 #include <linux/root_dev.h>
47 
48 #include <asm/mmu.h>
49 #include <asm/processor.h>
50 #include <asm/io.h>
51 #include <asm/pgtable.h>
52 #include <asm/prom.h>
53 #include <asm/rtas.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/iommu.h>
56 #include <asm/dma.h>
57 #include <asm/machdep.h>
58 #include <asm/irq.h>
59 #include <asm/kexec.h>
60 #include <asm/time.h>
61 #include <asm/nvram.h>
62 #include "xics.h"
63 #include <asm/pmc.h>
64 #include <asm/mpic.h>
65 #include <asm/ppc-pci.h>
66 #include <asm/i8259.h>
67 #include <asm/udbg.h>
68 #include <asm/smp.h>
69 
70 #include "plpar_wrappers.h"
71 #include "ras.h"
72 #include "firmware.h"
73 
74 #ifdef DEBUG
75 #define DBG(fmt...) udbg_printf(fmt)
76 #else
77 #define DBG(fmt...)
78 #endif
79 
80 extern void find_udbg_vterm(void);
81 
82 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
83 
84 static void pseries_shared_idle_sleep(void);
85 static void pseries_dedicated_idle_sleep(void);
86 
87 struct mpic *pSeries_mpic;
88 
89 static void pSeries_show_cpuinfo(struct seq_file *m)
90 {
91 	struct device_node *root;
92 	const char *model = "";
93 
94 	root = of_find_node_by_path("/");
95 	if (root)
96 		model = get_property(root, "model", NULL);
97 	seq_printf(m, "machine\t\t: CHRP %s\n", model);
98 	of_node_put(root);
99 }
100 
101 /* Initialize firmware assisted non-maskable interrupts if
102  * the firmware supports this feature.
103  */
104 static void __init fwnmi_init(void)
105 {
106 	unsigned long system_reset_addr, machine_check_addr;
107 
108 	int ibm_nmi_register = rtas_token("ibm,nmi-register");
109 	if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
110 		return;
111 
112 	/* If the kernel's not linked at zero we point the firmware at low
113 	 * addresses anyway, and use a trampoline to get to the real code. */
114 	system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
115 	machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
116 
117 	if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
118 				machine_check_addr))
119 		fwnmi_active = 1;
120 }
121 
122 static void __init pSeries_init_mpic(void)
123 {
124         unsigned int *addrp;
125 	struct device_node *np;
126 	unsigned long intack = 0;
127 
128 	/* All ISUs are setup, complete initialization */
129 	mpic_init(pSeries_mpic);
130 
131 	/* Check what kind of cascade ACK we have */
132         if (!(np = of_find_node_by_name(NULL, "pci"))
133             || !(addrp = (unsigned int *)
134                  get_property(np, "8259-interrupt-acknowledge", NULL)))
135                 printk(KERN_ERR "Cannot find pci to get ack address\n");
136         else
137 		intack = addrp[prom_n_addr_cells(np)-1];
138 	of_node_put(np);
139 
140 	/* Setup the legacy interrupts & controller */
141 	i8259_init(intack, 0);
142 
143 	/* Hook cascade to mpic */
144 	mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
145 }
146 
147 static void __init pSeries_setup_mpic(void)
148 {
149 	unsigned int *opprop;
150 	unsigned long openpic_addr = 0;
151         unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
152         struct device_node *root;
153 	int irq_count;
154 
155 	/* Find the Open PIC if present */
156 	root = of_find_node_by_path("/");
157 	opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
158 	if (opprop != 0) {
159 		int n = prom_n_addr_cells(root);
160 
161 		for (openpic_addr = 0; n > 0; --n)
162 			openpic_addr = (openpic_addr << 32) + *opprop++;
163 		printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
164 	}
165 	of_node_put(root);
166 
167 	BUG_ON(openpic_addr == 0);
168 
169 	/* Get the sense values from OF */
170 	prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
171 
172 	/* Setup the openpic driver */
173 	irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
174 	pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
175 				  16, 16, irq_count, /* isu size, irq offset, irq count */
176 				  NR_IRQS - 4, /* ipi offset */
177 				  senses, irq_count, /* sense & sense size */
178 				  " MPIC     ");
179 }
180 
181 static void pseries_lpar_enable_pmcs(void)
182 {
183 	unsigned long set, reset;
184 
185 	power4_enable_pmcs();
186 
187 	set = 1UL << 63;
188 	reset = 0;
189 	plpar_hcall_norets(H_PERFMON, set, reset);
190 
191 	/* instruct hypervisor to maintain PMCs */
192 	if (firmware_has_feature(FW_FEATURE_SPLPAR))
193 		get_lppaca()->pmcregs_in_use = 1;
194 }
195 
196 static void __init pSeries_setup_arch(void)
197 {
198 	/* Fixup ppc_md depending on the type of interrupt controller */
199 	if (ppc64_interrupt_controller == IC_OPEN_PIC) {
200 		ppc_md.init_IRQ       = pSeries_init_mpic;
201 		ppc_md.get_irq        = mpic_get_irq;
202 		/* Allocate the mpic now, so that find_and_init_phbs() can
203 		 * fill the ISUs */
204 		pSeries_setup_mpic();
205 	} else {
206 		ppc_md.init_IRQ       = xics_init_IRQ;
207 		ppc_md.get_irq        = xics_get_irq;
208 	}
209 
210 #ifdef CONFIG_SMP
211 	smp_init_pSeries();
212 #endif
213 	/* openpic global configuration register (64-bit format). */
214 	/* openpic Interrupt Source Unit pointer (64-bit format). */
215 	/* python0 facility area (mmio) (64-bit format) REAL address. */
216 
217 	/* init to some ~sane value until calibrate_delay() runs */
218 	loops_per_jiffy = 50000000;
219 
220 	if (ROOT_DEV == 0) {
221 		printk("No ramdisk, default root is /dev/sda2\n");
222 		ROOT_DEV = Root_SDA2;
223 	}
224 
225 	fwnmi_init();
226 
227 	/* Find and initialize PCI host bridges */
228 	init_pci_config_tokens();
229 	find_and_init_phbs();
230 	eeh_init();
231 
232 	pSeries_nvram_init();
233 
234 	/* Choose an idle loop */
235 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
236 		vpa_init(boot_cpuid);
237 		if (get_lppaca()->shared_proc) {
238 			printk(KERN_INFO "Using shared processor idle loop\n");
239 			ppc_md.power_save = pseries_shared_idle_sleep;
240 		} else {
241 			printk(KERN_INFO "Using dedicated idle loop\n");
242 			ppc_md.power_save = pseries_dedicated_idle_sleep;
243 		}
244 	} else {
245 		printk(KERN_INFO "Using default idle loop\n");
246 	}
247 
248 	if (firmware_has_feature(FW_FEATURE_LPAR))
249 		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
250 	else
251 		ppc_md.enable_pmcs = power4_enable_pmcs;
252 }
253 
254 static int __init pSeries_init_panel(void)
255 {
256 	/* Manually leave the kernel version on the panel. */
257 	ppc_md.progress("Linux ppc64\n", 0);
258 	ppc_md.progress(system_utsname.version, 0);
259 
260 	return 0;
261 }
262 arch_initcall(pSeries_init_panel);
263 
264 static  void __init pSeries_discover_pic(void)
265 {
266 	struct device_node *np;
267 	char *typep;
268 
269 	/*
270 	 * Setup interrupt mapping options that are needed for finish_device_tree
271 	 * to properly parse the OF interrupt tree & do the virtual irq mapping
272 	 */
273 	__irq_offset_value = NUM_ISA_INTERRUPTS;
274 	ppc64_interrupt_controller = IC_INVALID;
275 	for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
276 		typep = (char *)get_property(np, "compatible", NULL);
277 		if (strstr(typep, "open-pic")) {
278 			ppc64_interrupt_controller = IC_OPEN_PIC;
279 			break;
280 		} else if (strstr(typep, "ppc-xicp")) {
281 			ppc64_interrupt_controller = IC_PPC_XIC;
282 			break;
283 		}
284 	}
285 	if (ppc64_interrupt_controller == IC_INVALID)
286 		printk("pSeries_discover_pic: failed to recognize"
287 			" interrupt-controller\n");
288 
289 }
290 
291 static void pSeries_mach_cpu_die(void)
292 {
293 	local_irq_disable();
294 	idle_task_exit();
295 	/* Some hardware requires clearing the CPPR, while other hardware does not
296 	 * it is safe either way
297 	 */
298 	pSeriesLP_cppr_info(0, 0);
299 	rtas_stop_self();
300 	/* Should never get here... */
301 	BUG();
302 	for(;;);
303 }
304 
305 static int pseries_set_dabr(unsigned long dabr)
306 {
307 	return plpar_hcall_norets(H_SET_DABR, dabr);
308 }
309 
310 static int pseries_set_xdabr(unsigned long dabr)
311 {
312 	/* We want to catch accesses from kernel and userspace */
313 	return plpar_hcall_norets(H_SET_XDABR, dabr,
314 			H_DABRX_KERNEL | H_DABRX_USER);
315 }
316 
317 /*
318  * Early initialization.  Relocation is on but do not reference unbolted pages
319  */
320 static void __init pSeries_init_early(void)
321 {
322 	DBG(" -> pSeries_init_early()\n");
323 
324 	fw_feature_init();
325 
326 	if (firmware_has_feature(FW_FEATURE_LPAR))
327 		hpte_init_lpar();
328 	else
329 		hpte_init_native();
330 
331 	if (firmware_has_feature(FW_FEATURE_LPAR))
332 		find_udbg_vterm();
333 
334 	if (firmware_has_feature(FW_FEATURE_DABR))
335 		ppc_md.set_dabr = pseries_set_dabr;
336 	else if (firmware_has_feature(FW_FEATURE_XDABR))
337 		ppc_md.set_dabr = pseries_set_xdabr;
338 
339 	iommu_init_early_pSeries();
340 
341 	pSeries_discover_pic();
342 
343 	DBG(" <- pSeries_init_early()\n");
344 }
345 
346 
347 static int pSeries_check_legacy_ioport(unsigned int baseport)
348 {
349 	struct device_node *np;
350 
351 #define I8042_DATA_REG	0x60
352 #define FDC_BASE	0x3f0
353 
354 
355 	switch(baseport) {
356 	case I8042_DATA_REG:
357 		np = of_find_node_by_type(NULL, "8042");
358 		if (np == NULL)
359 			return -ENODEV;
360 		of_node_put(np);
361 		break;
362 	case FDC_BASE:
363 		np = of_find_node_by_type(NULL, "fdc");
364 		if (np == NULL)
365 			return -ENODEV;
366 		of_node_put(np);
367 		break;
368 	}
369 	return 0;
370 }
371 
372 /*
373  * Called very early, MMU is off, device-tree isn't unflattened
374  */
375 
376 static int __init pSeries_probe_hypertas(unsigned long node,
377 					 const char *uname, int depth,
378 					 void *data)
379 {
380 	if (depth != 1 ||
381 	    (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
382  		return 0;
383 
384 	if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
385  		powerpc_firmware_features |= FW_FEATURE_LPAR;
386 
387  	return 1;
388 }
389 
390 static int __init pSeries_probe(void)
391 {
392  	char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
393  					  "device_type", NULL);
394  	if (dtype == NULL)
395  		return 0;
396  	if (strcmp(dtype, "chrp"))
397 		return 0;
398 
399 	DBG("pSeries detected, looking for LPAR capability...\n");
400 
401 	/* Now try to figure out if we are running on LPAR */
402 	of_scan_flat_dt(pSeries_probe_hypertas, NULL);
403 
404 	DBG("Machine is%s LPAR !\n",
405 	    (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
406 
407 	return 1;
408 }
409 
410 
411 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
412 
413 static void pseries_dedicated_idle_sleep(void)
414 {
415 	unsigned int cpu = smp_processor_id();
416 	unsigned long start_snooze;
417 	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
418 
419 	/*
420 	 * Indicate to the HV that we are idle. Now would be
421 	 * a good time to find other work to dispatch.
422 	 */
423 	get_lppaca()->idle = 1;
424 
425 	/*
426 	 * We come in with interrupts disabled, and need_resched()
427 	 * has been checked recently.  If we should poll for a little
428 	 * while, do so.
429 	 */
430 	if (*smt_snooze_delay) {
431 		start_snooze = get_tb() +
432 			*smt_snooze_delay * tb_ticks_per_usec;
433 		local_irq_enable();
434 		set_thread_flag(TIF_POLLING_NRFLAG);
435 
436 		while (get_tb() < start_snooze) {
437 			if (need_resched() || cpu_is_offline(cpu))
438 				goto out;
439 			ppc64_runlatch_off();
440 			HMT_low();
441 			HMT_very_low();
442 		}
443 
444 		HMT_medium();
445 		clear_thread_flag(TIF_POLLING_NRFLAG);
446 		smp_mb();
447 		local_irq_disable();
448 		if (need_resched() || cpu_is_offline(cpu))
449 			goto out;
450 	}
451 
452 	/*
453 	 * Cede if the other thread is not idle, so that it can
454 	 * go single-threaded.  If the other thread is idle,
455 	 * we ask the hypervisor if it has pending work it
456 	 * wants to do and cede if it does.  Otherwise we keep
457 	 * polling in order to reduce interrupt latency.
458 	 *
459 	 * Doing the cede when the other thread is active will
460 	 * result in this thread going dormant, meaning the other
461 	 * thread gets to run in single-threaded (ST) mode, which
462 	 * is slightly faster than SMT mode with this thread at
463 	 * very low priority.  The cede enables interrupts, which
464 	 * doesn't matter here.
465 	 */
466 	if (!lppaca[cpu ^ 1].idle || poll_pending() == H_Pending)
467 		cede_processor();
468 
469 out:
470 	HMT_medium();
471 	get_lppaca()->idle = 0;
472 }
473 
474 static void pseries_shared_idle_sleep(void)
475 {
476 	/*
477 	 * Indicate to the HV that we are idle. Now would be
478 	 * a good time to find other work to dispatch.
479 	 */
480 	get_lppaca()->idle = 1;
481 
482 	/*
483 	 * Yield the processor to the hypervisor.  We return if
484 	 * an external interrupt occurs (which are driven prior
485 	 * to returning here) or if a prod occurs from another
486 	 * processor. When returning here, external interrupts
487 	 * are enabled.
488 	 */
489 	cede_processor();
490 
491 	get_lppaca()->idle = 0;
492 }
493 
494 static int pSeries_pci_probe_mode(struct pci_bus *bus)
495 {
496 	if (firmware_has_feature(FW_FEATURE_LPAR))
497 		return PCI_PROBE_DEVTREE;
498 	return PCI_PROBE_NORMAL;
499 }
500 
501 #ifdef CONFIG_KEXEC
502 static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
503 {
504 	/* Don't risk a hypervisor call if we're crashing */
505 	if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
506 		unsigned long vpa = __pa(get_lppaca());
507 
508 		if (unregister_vpa(hard_smp_processor_id(), vpa)) {
509 			printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
510 					"failed\n", smp_processor_id(),
511 					hard_smp_processor_id());
512 		}
513 	}
514 
515 	if (ppc64_interrupt_controller == IC_OPEN_PIC)
516 		mpic_teardown_this_cpu(secondary);
517 	else
518 		xics_teardown_cpu(secondary);
519 }
520 #endif
521 
522 define_machine(pseries) {
523 	.name			= "pSeries",
524 	.probe			= pSeries_probe,
525 	.setup_arch		= pSeries_setup_arch,
526 	.init_early		= pSeries_init_early,
527 	.show_cpuinfo		= pSeries_show_cpuinfo,
528 	.log_error		= pSeries_log_error,
529 	.pcibios_fixup		= pSeries_final_fixup,
530 	.pci_probe_mode		= pSeries_pci_probe_mode,
531 	.irq_bus_setup		= pSeries_irq_bus_setup,
532 	.restart		= rtas_restart,
533 	.power_off		= rtas_power_off,
534 	.halt			= rtas_halt,
535 	.panic			= rtas_os_term,
536 	.cpu_die		= pSeries_mach_cpu_die,
537 	.get_boot_time		= rtas_get_boot_time,
538 	.get_rtc_time		= rtas_get_rtc_time,
539 	.set_rtc_time		= rtas_set_rtc_time,
540 	.calibrate_decr		= generic_calibrate_decr,
541 	.progress		= rtas_progress,
542 	.check_legacy_ioport	= pSeries_check_legacy_ioport,
543 	.system_reset_exception = pSeries_system_reset_exception,
544 	.machine_check_exception = pSeries_machine_check_exception,
545 #ifdef CONFIG_KEXEC
546 	.kexec_cpu_down		= pseries_kexec_cpu_down,
547 	.machine_kexec		= default_machine_kexec,
548 	.machine_kexec_prepare	= default_machine_kexec_prepare,
549 	.machine_crash_shutdown	= default_machine_crash_shutdown,
550 #endif
551 };
552