xref: /linux/arch/powerpc/platforms/pseries/setup.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  *  64-bit pSeries and RS/6000 setup code.
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Adapted from 'alpha' version by Gary Thomas
6  *  Modified by Cort Dougan (cort@cs.nmt.edu)
7  *  Modified by PPC64 Team, IBM Corp
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 /*
16  * bootup setup stuff..
17  */
18 
19 #undef DEBUG
20 
21 #include <linux/config.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/a.out.h>
32 #include <linux/tty.h>
33 #include <linux/major.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/init.h>
37 #include <linux/ioport.h>
38 #include <linux/console.h>
39 #include <linux/pci.h>
40 #include <linux/utsname.h>
41 #include <linux/adb.h>
42 #include <linux/module.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/seq_file.h>
46 #include <linux/root_dev.h>
47 
48 #include <asm/mmu.h>
49 #include <asm/processor.h>
50 #include <asm/io.h>
51 #include <asm/pgtable.h>
52 #include <asm/prom.h>
53 #include <asm/rtas.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/iommu.h>
56 #include <asm/dma.h>
57 #include <asm/machdep.h>
58 #include <asm/irq.h>
59 #include <asm/time.h>
60 #include <asm/nvram.h>
61 #include "xics.h"
62 #include <asm/firmware.h>
63 #include <asm/pmc.h>
64 #include <asm/mpic.h>
65 #include <asm/ppc-pci.h>
66 #include <asm/i8259.h>
67 #include <asm/udbg.h>
68 #include <asm/smp.h>
69 
70 #include "plpar_wrappers.h"
71 
72 #ifdef DEBUG
73 #define DBG(fmt...) udbg_printf(fmt)
74 #else
75 #define DBG(fmt...)
76 #endif
77 
78 extern void find_udbg_vterm(void);
79 extern void system_reset_fwnmi(void);	/* from head.S */
80 extern void machine_check_fwnmi(void);	/* from head.S */
81 extern void generic_find_legacy_serial_ports(u64 *physport,
82 		unsigned int *default_speed);
83 
84 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
85 
86 extern void pSeries_system_reset_exception(struct pt_regs *regs);
87 extern int pSeries_machine_check_exception(struct pt_regs *regs);
88 
89 static void pseries_shared_idle(void);
90 static void pseries_dedicated_idle(void);
91 
92 struct mpic *pSeries_mpic;
93 
94 void pSeries_show_cpuinfo(struct seq_file *m)
95 {
96 	struct device_node *root;
97 	const char *model = "";
98 
99 	root = of_find_node_by_path("/");
100 	if (root)
101 		model = get_property(root, "model", NULL);
102 	seq_printf(m, "machine\t\t: CHRP %s\n", model);
103 	of_node_put(root);
104 }
105 
106 /* Initialize firmware assisted non-maskable interrupts if
107  * the firmware supports this feature.
108  *
109  */
110 static void __init fwnmi_init(void)
111 {
112 	int ret;
113 	int ibm_nmi_register = rtas_token("ibm,nmi-register");
114 	if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
115 		return;
116 	ret = rtas_call(ibm_nmi_register, 2, 1, NULL,
117 			__pa((unsigned long)system_reset_fwnmi),
118 			__pa((unsigned long)machine_check_fwnmi));
119 	if (ret == 0)
120 		fwnmi_active = 1;
121 }
122 
123 static void __init pSeries_init_mpic(void)
124 {
125         unsigned int *addrp;
126 	struct device_node *np;
127 	unsigned long intack = 0;
128 
129 	/* All ISUs are setup, complete initialization */
130 	mpic_init(pSeries_mpic);
131 
132 	/* Check what kind of cascade ACK we have */
133         if (!(np = of_find_node_by_name(NULL, "pci"))
134             || !(addrp = (unsigned int *)
135                  get_property(np, "8259-interrupt-acknowledge", NULL)))
136                 printk(KERN_ERR "Cannot find pci to get ack address\n");
137         else
138 		intack = addrp[prom_n_addr_cells(np)-1];
139 	of_node_put(np);
140 
141 	/* Setup the legacy interrupts & controller */
142 	i8259_init(intack, 0);
143 
144 	/* Hook cascade to mpic */
145 	mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
146 }
147 
148 static void __init pSeries_setup_mpic(void)
149 {
150 	unsigned int *opprop;
151 	unsigned long openpic_addr = 0;
152         unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
153         struct device_node *root;
154 	int irq_count;
155 
156 	/* Find the Open PIC if present */
157 	root = of_find_node_by_path("/");
158 	opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
159 	if (opprop != 0) {
160 		int n = prom_n_addr_cells(root);
161 
162 		for (openpic_addr = 0; n > 0; --n)
163 			openpic_addr = (openpic_addr << 32) + *opprop++;
164 		printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
165 	}
166 	of_node_put(root);
167 
168 	BUG_ON(openpic_addr == 0);
169 
170 	/* Get the sense values from OF */
171 	prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
172 
173 	/* Setup the openpic driver */
174 	irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
175 	pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
176 				  16, 16, irq_count, /* isu size, irq offset, irq count */
177 				  NR_IRQS - 4, /* ipi offset */
178 				  senses, irq_count, /* sense & sense size */
179 				  " MPIC     ");
180 }
181 
182 static void pseries_lpar_enable_pmcs(void)
183 {
184 	unsigned long set, reset;
185 
186 	power4_enable_pmcs();
187 
188 	set = 1UL << 63;
189 	reset = 0;
190 	plpar_hcall_norets(H_PERFMON, set, reset);
191 
192 	/* instruct hypervisor to maintain PMCs */
193 	if (firmware_has_feature(FW_FEATURE_SPLPAR))
194 		get_paca()->lppaca.pmcregs_in_use = 1;
195 }
196 
197 static void __init pSeries_setup_arch(void)
198 {
199 	/* Fixup ppc_md depending on the type of interrupt controller */
200 	if (ppc64_interrupt_controller == IC_OPEN_PIC) {
201 		ppc_md.init_IRQ       = pSeries_init_mpic;
202 		ppc_md.get_irq        = mpic_get_irq;
203 		/* Allocate the mpic now, so that find_and_init_phbs() can
204 		 * fill the ISUs */
205 		pSeries_setup_mpic();
206 	} else {
207 		ppc_md.init_IRQ       = xics_init_IRQ;
208 		ppc_md.get_irq        = xics_get_irq;
209 	}
210 
211 #ifdef CONFIG_SMP
212 	smp_init_pSeries();
213 #endif
214 	/* openpic global configuration register (64-bit format). */
215 	/* openpic Interrupt Source Unit pointer (64-bit format). */
216 	/* python0 facility area (mmio) (64-bit format) REAL address. */
217 
218 	/* init to some ~sane value until calibrate_delay() runs */
219 	loops_per_jiffy = 50000000;
220 
221 	if (ROOT_DEV == 0) {
222 		printk("No ramdisk, default root is /dev/sda2\n");
223 		ROOT_DEV = Root_SDA2;
224 	}
225 
226 	fwnmi_init();
227 
228 	/* Find and initialize PCI host bridges */
229 	init_pci_config_tokens();
230 	find_and_init_phbs();
231 	eeh_init();
232 
233 	pSeries_nvram_init();
234 
235 	/* Choose an idle loop */
236 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
237 		vpa_init(boot_cpuid);
238 		if (get_paca()->lppaca.shared_proc) {
239 			printk(KERN_INFO "Using shared processor idle loop\n");
240 			ppc_md.idle_loop = pseries_shared_idle;
241 		} else {
242 			printk(KERN_INFO "Using dedicated idle loop\n");
243 			ppc_md.idle_loop = pseries_dedicated_idle;
244 		}
245 	} else {
246 		printk(KERN_INFO "Using default idle loop\n");
247 		ppc_md.idle_loop = default_idle;
248 	}
249 
250 	if (platform_is_lpar())
251 		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
252 	else
253 		ppc_md.enable_pmcs = power4_enable_pmcs;
254 }
255 
256 static int __init pSeries_init_panel(void)
257 {
258 	/* Manually leave the kernel version on the panel. */
259 	ppc_md.progress("Linux ppc64\n", 0);
260 	ppc_md.progress(system_utsname.version, 0);
261 
262 	return 0;
263 }
264 arch_initcall(pSeries_init_panel);
265 
266 
267 /* Build up the ppc64_firmware_features bitmask field
268  * using contents of device-tree/ibm,hypertas-functions.
269  * Ultimately this functionality may be moved into prom.c prom_init().
270  */
271 static void __init fw_feature_init(void)
272 {
273 	struct device_node * dn;
274 	char * hypertas;
275 	unsigned int len;
276 
277 	DBG(" -> fw_feature_init()\n");
278 
279 	ppc64_firmware_features = 0;
280 	dn = of_find_node_by_path("/rtas");
281 	if (dn == NULL) {
282 		printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
283 		goto no_rtas;
284 	}
285 
286 	hypertas = get_property(dn, "ibm,hypertas-functions", &len);
287 	if (hypertas) {
288 		while (len > 0){
289 			int i, hypertas_len;
290 			/* check value against table of strings */
291 			for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
292 				if ((firmware_features_table[i].name) &&
293 				    (strcmp(firmware_features_table[i].name,hypertas))==0) {
294 					/* we have a match */
295 					ppc64_firmware_features |=
296 						(firmware_features_table[i].val);
297 					break;
298 				}
299 			}
300 			hypertas_len = strlen(hypertas);
301 			len -= hypertas_len +1;
302 			hypertas+= hypertas_len +1;
303 		}
304 	}
305 
306 	of_node_put(dn);
307 no_rtas:
308 
309 	DBG(" <- fw_feature_init()\n");
310 }
311 
312 
313 static  void __init pSeries_discover_pic(void)
314 {
315 	struct device_node *np;
316 	char *typep;
317 
318 	/*
319 	 * Setup interrupt mapping options that are needed for finish_device_tree
320 	 * to properly parse the OF interrupt tree & do the virtual irq mapping
321 	 */
322 	__irq_offset_value = NUM_ISA_INTERRUPTS;
323 	ppc64_interrupt_controller = IC_INVALID;
324 	for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
325 		typep = (char *)get_property(np, "compatible", NULL);
326 		if (strstr(typep, "open-pic"))
327 			ppc64_interrupt_controller = IC_OPEN_PIC;
328 		else if (strstr(typep, "ppc-xicp"))
329 			ppc64_interrupt_controller = IC_PPC_XIC;
330 		else
331 			printk("pSeries_discover_pic: failed to recognize"
332 			       " interrupt-controller\n");
333 		break;
334 	}
335 }
336 
337 static void pSeries_mach_cpu_die(void)
338 {
339 	local_irq_disable();
340 	idle_task_exit();
341 	/* Some hardware requires clearing the CPPR, while other hardware does not
342 	 * it is safe either way
343 	 */
344 	pSeriesLP_cppr_info(0, 0);
345 	rtas_stop_self();
346 	/* Should never get here... */
347 	BUG();
348 	for(;;);
349 }
350 
351 static int pseries_set_dabr(unsigned long dabr)
352 {
353 	return plpar_hcall_norets(H_SET_DABR, dabr);
354 }
355 
356 static int pseries_set_xdabr(unsigned long dabr)
357 {
358 	/* We want to catch accesses from kernel and userspace */
359 	return plpar_hcall_norets(H_SET_XDABR, dabr,
360 			H_DABRX_KERNEL | H_DABRX_USER);
361 }
362 
363 /*
364  * Early initialization.  Relocation is on but do not reference unbolted pages
365  */
366 static void __init pSeries_init_early(void)
367 {
368 	void *comport;
369 	int iommu_off = 0;
370 	unsigned int default_speed;
371 	u64 physport;
372 
373 	DBG(" -> pSeries_init_early()\n");
374 
375 	fw_feature_init();
376 
377 	if (platform_is_lpar())
378 		hpte_init_lpar();
379 	else {
380 		hpte_init_native();
381 		iommu_off = (of_chosen &&
382 			     get_property(of_chosen, "linux,iommu-off", NULL));
383 	}
384 
385 	generic_find_legacy_serial_ports(&physport, &default_speed);
386 
387 	if (platform_is_lpar())
388 		find_udbg_vterm();
389 	else if (physport) {
390 		/* Map the uart for udbg. */
391 		comport = (void *)ioremap(physport, 16);
392 		udbg_init_uart(comport, default_speed);
393 
394 		DBG("Hello World !\n");
395 	}
396 
397 	if (firmware_has_feature(FW_FEATURE_DABR))
398 		ppc_md.set_dabr = pseries_set_dabr;
399 	else if (firmware_has_feature(FW_FEATURE_XDABR))
400 		ppc_md.set_dabr = pseries_set_xdabr;
401 
402 	iommu_init_early_pSeries();
403 
404 	pSeries_discover_pic();
405 
406 	DBG(" <- pSeries_init_early()\n");
407 }
408 
409 
410 static int pSeries_check_legacy_ioport(unsigned int baseport)
411 {
412 	struct device_node *np;
413 
414 #define I8042_DATA_REG	0x60
415 #define FDC_BASE	0x3f0
416 
417 
418 	switch(baseport) {
419 	case I8042_DATA_REG:
420 		np = of_find_node_by_type(NULL, "8042");
421 		if (np == NULL)
422 			return -ENODEV;
423 		of_node_put(np);
424 		break;
425 	case FDC_BASE:
426 		np = of_find_node_by_type(NULL, "fdc");
427 		if (np == NULL)
428 			return -ENODEV;
429 		of_node_put(np);
430 		break;
431 	}
432 	return 0;
433 }
434 
435 /*
436  * Called very early, MMU is off, device-tree isn't unflattened
437  */
438 extern struct machdep_calls pSeries_md;
439 
440 static int __init pSeries_probe(int platform)
441 {
442 	if (platform != PLATFORM_PSERIES &&
443 	    platform != PLATFORM_PSERIES_LPAR)
444 		return 0;
445 
446 	/* if we have some ppc_md fixups for LPAR to do, do
447 	 * it here ...
448 	 */
449 
450 	return 1;
451 }
452 
453 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
454 
455 static inline void dedicated_idle_sleep(unsigned int cpu)
456 {
457 	struct paca_struct *ppaca = &paca[cpu ^ 1];
458 
459 	/* Only sleep if the other thread is not idle */
460 	if (!(ppaca->lppaca.idle)) {
461 		local_irq_disable();
462 
463 		/*
464 		 * We are about to sleep the thread and so wont be polling any
465 		 * more.
466 		 */
467 		clear_thread_flag(TIF_POLLING_NRFLAG);
468 		smp_mb__after_clear_bit();
469 
470 		/*
471 		 * SMT dynamic mode. Cede will result in this thread going
472 		 * dormant, if the partner thread is still doing work.  Thread
473 		 * wakes up if partner goes idle, an interrupt is presented, or
474 		 * a prod occurs.  Returning from the cede enables external
475 		 * interrupts.
476 		 */
477 		if (!need_resched())
478 			cede_processor();
479 		else
480 			local_irq_enable();
481 		set_thread_flag(TIF_POLLING_NRFLAG);
482 	} else {
483 		/*
484 		 * Give the HV an opportunity at the processor, since we are
485 		 * not doing any work.
486 		 */
487 		poll_pending();
488 	}
489 }
490 
491 static void pseries_dedicated_idle(void)
492 {
493 	struct paca_struct *lpaca = get_paca();
494 	unsigned int cpu = smp_processor_id();
495 	unsigned long start_snooze;
496 	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
497 	set_thread_flag(TIF_POLLING_NRFLAG);
498 
499 	while (1) {
500 		/*
501 		 * Indicate to the HV that we are idle. Now would be
502 		 * a good time to find other work to dispatch.
503 		 */
504 		lpaca->lppaca.idle = 1;
505 
506 		if (!need_resched()) {
507 			start_snooze = get_tb() +
508 				*smt_snooze_delay * tb_ticks_per_usec;
509 
510 			while (!need_resched() && !cpu_is_offline(cpu)) {
511 				ppc64_runlatch_off();
512 
513 				/*
514 				 * Go into low thread priority and possibly
515 				 * low power mode.
516 				 */
517 				HMT_low();
518 				HMT_very_low();
519 
520 				if (*smt_snooze_delay != 0 &&
521 				    get_tb() > start_snooze) {
522 					HMT_medium();
523 					dedicated_idle_sleep(cpu);
524 				}
525 
526 			}
527 
528 			HMT_medium();
529 		}
530 
531 		lpaca->lppaca.idle = 0;
532 		ppc64_runlatch_on();
533 
534 		preempt_enable_no_resched();
535 		schedule();
536 		preempt_disable();
537 
538 		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
539 			cpu_die();
540 	}
541 }
542 
543 static void pseries_shared_idle(void)
544 {
545 	struct paca_struct *lpaca = get_paca();
546 	unsigned int cpu = smp_processor_id();
547 
548 	while (1) {
549 		/*
550 		 * Indicate to the HV that we are idle. Now would be
551 		 * a good time to find other work to dispatch.
552 		 */
553 		lpaca->lppaca.idle = 1;
554 
555 		while (!need_resched() && !cpu_is_offline(cpu)) {
556 			local_irq_disable();
557 			ppc64_runlatch_off();
558 
559 			/*
560 			 * Yield the processor to the hypervisor.  We return if
561 			 * an external interrupt occurs (which are driven prior
562 			 * to returning here) or if a prod occurs from another
563 			 * processor. When returning here, external interrupts
564 			 * are enabled.
565 			 *
566 			 * Check need_resched() again with interrupts disabled
567 			 * to avoid a race.
568 			 */
569 			if (!need_resched())
570 				cede_processor();
571 			else
572 				local_irq_enable();
573 
574 			HMT_medium();
575 		}
576 
577 		lpaca->lppaca.idle = 0;
578 		ppc64_runlatch_on();
579 
580 		preempt_enable_no_resched();
581 		schedule();
582 		preempt_disable();
583 
584 		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
585 			cpu_die();
586 	}
587 }
588 
589 static int pSeries_pci_probe_mode(struct pci_bus *bus)
590 {
591 	if (platform_is_lpar())
592 		return PCI_PROBE_DEVTREE;
593 	return PCI_PROBE_NORMAL;
594 }
595 
596 #ifdef CONFIG_KEXEC
597 static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
598 {
599 	/* Don't risk a hypervisor call if we're crashing */
600 	if (!crash_shutdown) {
601 		unsigned long vpa = __pa(&get_paca()->lppaca);
602 
603 		if (unregister_vpa(hard_smp_processor_id(), vpa)) {
604 			printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
605 					"failed\n", smp_processor_id(),
606 					hard_smp_processor_id());
607 		}
608 	}
609 
610 	if (ppc64_interrupt_controller == IC_OPEN_PIC)
611 		mpic_teardown_this_cpu(secondary);
612 	else
613 		xics_teardown_cpu(secondary);
614 }
615 #endif
616 
617 struct machdep_calls __initdata pSeries_md = {
618 	.probe			= pSeries_probe,
619 	.setup_arch		= pSeries_setup_arch,
620 	.init_early		= pSeries_init_early,
621 	.show_cpuinfo		= pSeries_show_cpuinfo,
622 	.log_error		= pSeries_log_error,
623 	.pcibios_fixup		= pSeries_final_fixup,
624 	.pci_probe_mode		= pSeries_pci_probe_mode,
625 	.irq_bus_setup		= pSeries_irq_bus_setup,
626 	.restart		= rtas_restart,
627 	.power_off		= rtas_power_off,
628 	.halt			= rtas_halt,
629 	.panic			= rtas_os_term,
630 	.cpu_die		= pSeries_mach_cpu_die,
631 	.get_boot_time		= rtas_get_boot_time,
632 	.get_rtc_time		= rtas_get_rtc_time,
633 	.set_rtc_time		= rtas_set_rtc_time,
634 	.calibrate_decr		= generic_calibrate_decr,
635 	.progress		= rtas_progress,
636 	.check_legacy_ioport	= pSeries_check_legacy_ioport,
637 	.system_reset_exception = pSeries_system_reset_exception,
638 	.machine_check_exception = pSeries_machine_check_exception,
639 #ifdef CONFIG_KEXEC
640 	.kexec_cpu_down		= pseries_kexec_cpu_down,
641 #endif
642 };
643