xref: /linux/arch/powerpc/kernel/setup-common.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  * Common boot and setup code for both 32-bit and 64-bit.
3  * Extracted from arch/powerpc/kernel/setup_64.c.
4  *
5  * Copyright (C) 2001 PPC64 Team, IBM Corp
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #undef DEBUG
14 
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/string.h>
18 #include <linux/sched.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/reboot.h>
22 #include <linux/delay.h>
23 #include <linux/initrd.h>
24 #include <linux/platform_device.h>
25 #include <linux/ide.h>
26 #include <linux/seq_file.h>
27 #include <linux/ioport.h>
28 #include <linux/console.h>
29 #include <linux/utsname.h>
30 #include <linux/tty.h>
31 #include <linux/root_dev.h>
32 #include <linux/notifier.h>
33 #include <linux/cpu.h>
34 #include <linux/unistd.h>
35 #include <linux/serial.h>
36 #include <linux/serial_8250.h>
37 #include <asm/io.h>
38 #include <asm/prom.h>
39 #include <asm/processor.h>
40 #include <asm/vdso_datapage.h>
41 #include <asm/pgtable.h>
42 #include <asm/smp.h>
43 #include <asm/elf.h>
44 #include <asm/machdep.h>
45 #include <asm/time.h>
46 #include <asm/cputable.h>
47 #include <asm/sections.h>
48 #include <asm/firmware.h>
49 #include <asm/btext.h>
50 #include <asm/nvram.h>
51 #include <asm/setup.h>
52 #include <asm/system.h>
53 #include <asm/rtas.h>
54 #include <asm/iommu.h>
55 #include <asm/serial.h>
56 #include <asm/cache.h>
57 #include <asm/page.h>
58 #include <asm/mmu.h>
59 #include <asm/lmb.h>
60 #include <asm/xmon.h>
61 
62 #include "setup.h"
63 
64 #ifdef DEBUG
65 #include <asm/udbg.h>
66 #define DBG(fmt...) udbg_printf(fmt)
67 #else
68 #define DBG(fmt...)
69 #endif
70 
71 /* The main machine-dep calls structure
72  */
73 struct machdep_calls ppc_md;
74 EXPORT_SYMBOL(ppc_md);
75 struct machdep_calls *machine_id;
76 EXPORT_SYMBOL(machine_id);
77 
78 unsigned long klimit = (unsigned long) _end;
79 
80 /*
81  * This still seems to be needed... -- paulus
82  */
83 struct screen_info screen_info = {
84 	.orig_x = 0,
85 	.orig_y = 25,
86 	.orig_video_cols = 80,
87 	.orig_video_lines = 25,
88 	.orig_video_isVGA = 1,
89 	.orig_video_points = 16
90 };
91 
92 #ifdef __DO_IRQ_CANON
93 /* XXX should go elsewhere eventually */
94 int ppc_do_canonicalize_irqs;
95 EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
96 #endif
97 
98 /* also used by kexec */
99 void machine_shutdown(void)
100 {
101 	if (ppc_md.machine_shutdown)
102 		ppc_md.machine_shutdown();
103 }
104 
105 void machine_restart(char *cmd)
106 {
107 	machine_shutdown();
108 	if (ppc_md.restart)
109 		ppc_md.restart(cmd);
110 #ifdef CONFIG_SMP
111 	smp_send_stop();
112 #endif
113 	printk(KERN_EMERG "System Halted, OK to turn off power\n");
114 	local_irq_disable();
115 	while (1) ;
116 }
117 
118 void machine_power_off(void)
119 {
120 	machine_shutdown();
121 	if (ppc_md.power_off)
122 		ppc_md.power_off();
123 #ifdef CONFIG_SMP
124 	smp_send_stop();
125 #endif
126 	printk(KERN_EMERG "System Halted, OK to turn off power\n");
127 	local_irq_disable();
128 	while (1) ;
129 }
130 /* Used by the G5 thermal driver */
131 EXPORT_SYMBOL_GPL(machine_power_off);
132 
133 void (*pm_power_off)(void) = machine_power_off;
134 EXPORT_SYMBOL_GPL(pm_power_off);
135 
136 void machine_halt(void)
137 {
138 	machine_shutdown();
139 	if (ppc_md.halt)
140 		ppc_md.halt();
141 #ifdef CONFIG_SMP
142 	smp_send_stop();
143 #endif
144 	printk(KERN_EMERG "System Halted, OK to turn off power\n");
145 	local_irq_disable();
146 	while (1) ;
147 }
148 
149 
150 #ifdef CONFIG_TAU
151 extern u32 cpu_temp(unsigned long cpu);
152 extern u32 cpu_temp_both(unsigned long cpu);
153 #endif /* CONFIG_TAU */
154 
155 #ifdef CONFIG_SMP
156 DEFINE_PER_CPU(unsigned int, pvr);
157 #endif
158 
159 static int show_cpuinfo(struct seq_file *m, void *v)
160 {
161 	unsigned long cpu_id = (unsigned long)v - 1;
162 	unsigned int pvr;
163 	unsigned short maj;
164 	unsigned short min;
165 
166 	if (cpu_id == NR_CPUS) {
167 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
168 		unsigned long bogosum = 0;
169 		int i;
170 		for_each_online_cpu(i)
171 			bogosum += loops_per_jiffy;
172 		seq_printf(m, "total bogomips\t: %lu.%02lu\n",
173 			   bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
174 #endif /* CONFIG_SMP && CONFIG_PPC32 */
175 		seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
176 		if (ppc_md.name)
177 			seq_printf(m, "platform\t: %s\n", ppc_md.name);
178 		if (ppc_md.show_cpuinfo != NULL)
179 			ppc_md.show_cpuinfo(m);
180 
181 		return 0;
182 	}
183 
184 	/* We only show online cpus: disable preempt (overzealous, I
185 	 * knew) to prevent cpu going down. */
186 	preempt_disable();
187 	if (!cpu_online(cpu_id)) {
188 		preempt_enable();
189 		return 0;
190 	}
191 
192 #ifdef CONFIG_SMP
193 	pvr = per_cpu(pvr, cpu_id);
194 #else
195 	pvr = mfspr(SPRN_PVR);
196 #endif
197 	maj = (pvr >> 8) & 0xFF;
198 	min = pvr & 0xFF;
199 
200 	seq_printf(m, "processor\t: %lu\n", cpu_id);
201 	seq_printf(m, "cpu\t\t: ");
202 
203 	if (cur_cpu_spec->pvr_mask)
204 		seq_printf(m, "%s", cur_cpu_spec->cpu_name);
205 	else
206 		seq_printf(m, "unknown (%08x)", pvr);
207 
208 #ifdef CONFIG_ALTIVEC
209 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
210 		seq_printf(m, ", altivec supported");
211 #endif /* CONFIG_ALTIVEC */
212 
213 	seq_printf(m, "\n");
214 
215 #ifdef CONFIG_TAU
216 	if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
217 #ifdef CONFIG_TAU_AVERAGE
218 		/* more straightforward, but potentially misleading */
219 		seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
220 			   cpu_temp(cpu_id));
221 #else
222 		/* show the actual temp sensor range */
223 		u32 temp;
224 		temp = cpu_temp_both(cpu_id);
225 		seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
226 			   temp & 0xff, temp >> 16);
227 #endif
228 	}
229 #endif /* CONFIG_TAU */
230 
231 	/*
232 	 * Assume here that all clock rates are the same in a
233 	 * smp system.  -- Cort
234 	 */
235 	if (ppc_proc_freq)
236 		seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
237 			   ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
238 
239 	if (ppc_md.show_percpuinfo != NULL)
240 		ppc_md.show_percpuinfo(m, cpu_id);
241 
242 	/* If we are a Freescale core do a simple check so
243 	 * we dont have to keep adding cases in the future */
244 	if (PVR_VER(pvr) & 0x8000) {
245 		maj = PVR_MAJ(pvr);
246 		min = PVR_MIN(pvr);
247 	} else {
248 		switch (PVR_VER(pvr)) {
249 			case 0x0020:	/* 403 family */
250 				maj = PVR_MAJ(pvr) + 1;
251 				min = PVR_MIN(pvr);
252 				break;
253 			case 0x1008:	/* 740P/750P ?? */
254 				maj = ((pvr >> 8) & 0xFF) - 1;
255 				min = pvr & 0xFF;
256 				break;
257 			default:
258 				maj = (pvr >> 8) & 0xFF;
259 				min = pvr & 0xFF;
260 				break;
261 		}
262 	}
263 
264 	seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
265 		   maj, min, PVR_VER(pvr), PVR_REV(pvr));
266 
267 #ifdef CONFIG_PPC32
268 	seq_printf(m, "bogomips\t: %lu.%02lu\n",
269 		   loops_per_jiffy / (500000/HZ),
270 		   (loops_per_jiffy / (5000/HZ)) % 100);
271 #endif
272 
273 #ifdef CONFIG_SMP
274 	seq_printf(m, "\n");
275 #endif
276 
277 	preempt_enable();
278 	return 0;
279 }
280 
281 static void *c_start(struct seq_file *m, loff_t *pos)
282 {
283 	unsigned long i = *pos;
284 
285 	return i <= NR_CPUS ? (void *)(i + 1) : NULL;
286 }
287 
288 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
289 {
290 	++*pos;
291 	return c_start(m, pos);
292 }
293 
294 static void c_stop(struct seq_file *m, void *v)
295 {
296 }
297 
298 struct seq_operations cpuinfo_op = {
299 	.start =c_start,
300 	.next =	c_next,
301 	.stop =	c_stop,
302 	.show =	show_cpuinfo,
303 };
304 
305 void __init check_for_initrd(void)
306 {
307 #ifdef CONFIG_BLK_DEV_INITRD
308 	unsigned long *prop;
309 
310 	DBG(" -> check_for_initrd()\n");
311 
312 	if (of_chosen) {
313 		prop = (unsigned long *)get_property(of_chosen,
314 				"linux,initrd-start", NULL);
315 		if (prop != NULL) {
316 			initrd_start = (unsigned long)__va(*prop);
317 			prop = (unsigned long *)get_property(of_chosen,
318 					"linux,initrd-end", NULL);
319 			if (prop != NULL) {
320 				initrd_end = (unsigned long)__va(*prop);
321 				initrd_below_start_ok = 1;
322 			} else
323 				initrd_start = 0;
324 		}
325 	}
326 
327 	/* If we were passed an initrd, set the ROOT_DEV properly if the values
328 	 * look sensible. If not, clear initrd reference.
329 	 */
330 	if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
331 	    initrd_end > initrd_start)
332 		ROOT_DEV = Root_RAM0;
333 	else
334 		initrd_start = initrd_end = 0;
335 
336 	if (initrd_start)
337 		printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
338 
339 	DBG(" <- check_for_initrd()\n");
340 #endif /* CONFIG_BLK_DEV_INITRD */
341 }
342 
343 #ifdef CONFIG_SMP
344 
345 /**
346  * setup_cpu_maps - initialize the following cpu maps:
347  *                  cpu_possible_map
348  *                  cpu_present_map
349  *                  cpu_sibling_map
350  *
351  * Having the possible map set up early allows us to restrict allocations
352  * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
353  *
354  * We do not initialize the online map here; cpus set their own bits in
355  * cpu_online_map as they come up.
356  *
357  * This function is valid only for Open Firmware systems.  finish_device_tree
358  * must be called before using this.
359  *
360  * While we're here, we may as well set the "physical" cpu ids in the paca.
361  *
362  * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
363  */
364 void __init smp_setup_cpu_maps(void)
365 {
366 	struct device_node *dn = NULL;
367 	int cpu = 0;
368 
369 	while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
370 		int *intserv;
371 		int j, len = sizeof(u32), nthreads = 1;
372 
373 		intserv = (int *)get_property(dn, "ibm,ppc-interrupt-server#s",
374 					      &len);
375 		if (intserv)
376 			nthreads = len / sizeof(int);
377 		else {
378 			intserv = (int *) get_property(dn, "reg", NULL);
379 			if (!intserv)
380 				intserv = &cpu;	/* assume logical == phys */
381 		}
382 
383 		for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
384 			cpu_set(cpu, cpu_present_map);
385 			set_hard_smp_processor_id(cpu, intserv[j]);
386 			cpu_set(cpu, cpu_possible_map);
387 			cpu++;
388 		}
389 	}
390 
391 #ifdef CONFIG_PPC64
392 	/*
393 	 * On pSeries LPAR, we need to know how many cpus
394 	 * could possibly be added to this partition.
395 	 */
396 	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
397 	    (dn = of_find_node_by_path("/rtas"))) {
398 		int num_addr_cell, num_size_cell, maxcpus;
399 		unsigned int *ireg;
400 
401 		num_addr_cell = prom_n_addr_cells(dn);
402 		num_size_cell = prom_n_size_cells(dn);
403 
404 		ireg = (unsigned int *)
405 			get_property(dn, "ibm,lrdr-capacity", NULL);
406 
407 		if (!ireg)
408 			goto out;
409 
410 		maxcpus = ireg[num_addr_cell + num_size_cell];
411 
412 		/* Double maxcpus for processors which have SMT capability */
413 		if (cpu_has_feature(CPU_FTR_SMT))
414 			maxcpus *= 2;
415 
416 		if (maxcpus > NR_CPUS) {
417 			printk(KERN_WARNING
418 			       "Partition configured for %d cpus, "
419 			       "operating system maximum is %d.\n",
420 			       maxcpus, NR_CPUS);
421 			maxcpus = NR_CPUS;
422 		} else
423 			printk(KERN_INFO "Partition configured for %d cpus.\n",
424 			       maxcpus);
425 
426 		for (cpu = 0; cpu < maxcpus; cpu++)
427 			cpu_set(cpu, cpu_possible_map);
428 	out:
429 		of_node_put(dn);
430 	}
431 
432 	/*
433 	 * Do the sibling map; assume only two threads per processor.
434 	 */
435 	for_each_possible_cpu(cpu) {
436 		cpu_set(cpu, cpu_sibling_map[cpu]);
437 		if (cpu_has_feature(CPU_FTR_SMT))
438 			cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
439 	}
440 
441 	vdso_data->processorCount = num_present_cpus();
442 #endif /* CONFIG_PPC64 */
443 }
444 #endif /* CONFIG_SMP */
445 
446 int __initdata do_early_xmon;
447 #ifdef CONFIG_XMON
448 static int __init early_xmon(char *p)
449 {
450 	/* ensure xmon is enabled */
451 	if (p) {
452 		if (strncmp(p, "on", 2) == 0)
453 			xmon_init(1);
454 		if (strncmp(p, "off", 3) == 0)
455 			xmon_init(0);
456 		if (strncmp(p, "early", 5) != 0)
457 			return 0;
458 	}
459 	xmon_init(1);
460 	do_early_xmon = 1;
461 
462 	return 0;
463 }
464 early_param("xmon", early_xmon);
465 #endif
466 
467 static __init int add_pcspkr(void)
468 {
469 	struct device_node *np;
470 	struct platform_device *pd;
471 	int ret;
472 
473 	np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
474 	of_node_put(np);
475 	if (!np)
476 		return -ENODEV;
477 
478 	pd = platform_device_alloc("pcspkr", -1);
479 	if (!pd)
480 		return -ENOMEM;
481 
482 	ret = platform_device_add(pd);
483 	if (ret)
484 		platform_device_put(pd);
485 
486 	return ret;
487 }
488 device_initcall(add_pcspkr);
489 
490 void probe_machine(void)
491 {
492 	extern struct machdep_calls __machine_desc_start;
493 	extern struct machdep_calls __machine_desc_end;
494 
495 	/*
496 	 * Iterate all ppc_md structures until we find the proper
497 	 * one for the current machine type
498 	 */
499 	DBG("Probing machine type ...\n");
500 
501 	for (machine_id = &__machine_desc_start;
502 	     machine_id < &__machine_desc_end;
503 	     machine_id++) {
504 		DBG("  %s ...", machine_id->name);
505 		memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
506 		if (ppc_md.probe()) {
507 			DBG(" match !\n");
508 			break;
509 		}
510 		DBG("\n");
511 	}
512 	/* What can we do if we didn't find ? */
513 	if (machine_id >= &__machine_desc_end) {
514 		DBG("No suitable machine found !\n");
515 		for (;;);
516 	}
517 
518 	printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
519 }
520 
521 int check_legacy_ioport(unsigned long base_port)
522 {
523 	if (ppc_md.check_legacy_ioport == NULL)
524 		return 0;
525 	return ppc_md.check_legacy_ioport(base_port);
526 }
527 EXPORT_SYMBOL(check_legacy_ioport);
528 
529 static int ppc_panic_event(struct notifier_block *this,
530                              unsigned long event, void *ptr)
531 {
532 	ppc_md.panic(ptr);  /* May not return */
533 	return NOTIFY_DONE;
534 }
535 
536 static struct notifier_block ppc_panic_block = {
537 	.notifier_call = ppc_panic_event,
538 	.priority = INT_MIN /* may not return; must be done last */
539 };
540 
541 void __init setup_panic(void)
542 {
543 	atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
544 }
545