xref: /linux/arch/alpha/kernel/setup.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/alpha/kernel/setup.c
4  *
5  *  Copyright (C) 1995  Linus Torvalds
6  */
7 
8 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
9 
10 /*
11  * Bootup setup stuff.
12  */
13 
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/screen_info.h>
23 #include <linux/delay.h>
24 #include <linux/mc146818rtc.h>
25 #include <linux/console.h>
26 #include <linux/cpu.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/ioport.h>
31 #include <linux/panic_notifier.h>
32 #include <linux/platform_device.h>
33 #include <linux/memblock.h>
34 #include <linux/pci.h>
35 #include <linux/seq_file.h>
36 #include <linux/root_dev.h>
37 #include <linux/initrd.h>
38 #include <linux/eisa.h>
39 #include <linux/pfn.h>
40 #ifdef CONFIG_MAGIC_SYSRQ
41 #include <linux/sysrq.h>
42 #include <linux/reboot.h>
43 #endif
44 #include <linux/notifier.h>
45 #include <asm/setup.h>
46 #include <asm/io.h>
47 #include <linux/log2.h>
48 #include <linux/export.h>
49 
50 static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
51 static struct notifier_block alpha_panic_block = {
52 	alpha_panic_event,
53         NULL,
54         INT_MAX /* try to do it first */
55 };
56 
57 #include <linux/uaccess.h>
58 #include <asm/hwrpb.h>
59 #include <asm/dma.h>
60 #include <asm/mmu_context.h>
61 #include <asm/console.h>
62 
63 #include "proto.h"
64 #include "pci_impl.h"
65 
66 
67 struct hwrpb_struct *hwrpb;
68 EXPORT_SYMBOL(hwrpb);
69 unsigned long srm_hae;
70 
71 int alpha_l1i_cacheshape;
72 int alpha_l1d_cacheshape;
73 int alpha_l2_cacheshape;
74 int alpha_l3_cacheshape;
75 
76 #ifdef CONFIG_VERBOSE_MCHECK
77 /* 0=minimum, 1=verbose, 2=all */
78 /* These can be overridden via the command line, ie "verbose_mcheck=2") */
79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80 #endif
81 
82 /* Which processor we booted from.  */
83 int boot_cpuid;
84 
85 /*
86  * Using SRM callbacks for initial console output. This works from
87  * setup_arch() time through the end of time_init(), as those places
88  * are under our (Alpha) control.
89 
90  * "srmcons" specified in the boot command arguments allows us to
91  * see kernel messages during the period of time before the true
92  * console device is "registered" during console_init().
93  * As of this version (2.5.59), console_init() will call
94  * disable_early_printk() as the last action before initializing
95  * the console drivers. That's the last possible time srmcons can be
96  * unregistered without interfering with console behavior.
97  *
98  * By default, OFF; set it with a bootcommand arg of "srmcons" or
99  * "console=srm". The meaning of these two args is:
100  *     "srmcons"     - early callback prints
101  *     "console=srm" - full callback based console, including early prints
102  */
103 int srmcons_output = 0;
104 
105 /* Enforce a memory size limit; useful for testing. By default, none. */
106 unsigned long mem_size_limit = 0;
107 
108 /* Set AGP GART window size (0 means disabled). */
109 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
110 
111 #ifdef CONFIG_ALPHA_GENERIC
112 struct alpha_machine_vector alpha_mv;
113 EXPORT_SYMBOL(alpha_mv);
114 #endif
115 
116 #ifndef alpha_using_srm
117 int alpha_using_srm;
118 EXPORT_SYMBOL(alpha_using_srm);
119 #endif
120 
121 #ifndef alpha_using_qemu
122 int alpha_using_qemu;
123 #endif
124 
125 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
126 					       unsigned long);
127 static struct alpha_machine_vector *get_sysvec_byname(const char *);
128 static void get_sysnames(unsigned long, unsigned long, unsigned long,
129 			 char **, char **);
130 static void determine_cpu_caches (unsigned int);
131 
132 static char __initdata command_line[COMMAND_LINE_SIZE];
133 
134 #ifdef CONFIG_VGA_CONSOLE
135 /*
136  * The format of "screen_info" is strange, and due to early
137  * i386-setup code. This is just enough to make the console
138  * code think we're on a VGA color display.
139  */
140 
141 struct screen_info vgacon_screen_info = {
142 	.orig_x = 0,
143 	.orig_y = 25,
144 	.orig_video_cols = 80,
145 	.orig_video_lines = 25,
146 	.orig_video_isVGA = 1,
147 	.orig_video_points = 16
148 };
149 #endif
150 
151 /*
152  * The direct map I/O window, if any.  This should be the same
153  * for all busses, since it's used by virt_to_bus.
154  */
155 
156 unsigned long __direct_map_base;
157 unsigned long __direct_map_size;
158 EXPORT_SYMBOL(__direct_map_base);
159 EXPORT_SYMBOL(__direct_map_size);
160 
161 /*
162  * Declare all of the machine vectors.
163  */
164 
165 /* GCC 2.7.2 (on alpha at least) is lame.  It does not support either
166    __attribute__((weak)) or #pragma weak.  Bypass it and talk directly
167    to the assembler.  */
168 
169 #define WEAK(X) \
170 	extern struct alpha_machine_vector X; \
171 	asm(".weak "#X)
172 
173 WEAK(alcor_mv);
174 WEAK(clipper_mv);
175 WEAK(dp264_mv);
176 WEAK(eb164_mv);
177 WEAK(eiger_mv);
178 WEAK(lx164_mv);
179 WEAK(marvel_ev7_mv);
180 WEAK(miata_mv);
181 WEAK(mikasa_primo_mv);
182 WEAK(monet_mv);
183 WEAK(nautilus_mv);
184 WEAK(noritake_primo_mv);
185 WEAK(pc164_mv);
186 WEAK(privateer_mv);
187 WEAK(rawhide_mv);
188 WEAK(ruffian_mv);
189 WEAK(rx164_mv);
190 WEAK(sable_gamma_mv);
191 WEAK(shark_mv);
192 WEAK(sx164_mv);
193 WEAK(takara_mv);
194 WEAK(titan_mv);
195 WEAK(webbrick_mv);
196 WEAK(wildfire_mv);
197 WEAK(xlt_mv);
198 
199 #undef WEAK
200 
201 /*
202  * I/O resources inherited from PeeCees.  Except for perhaps the
203  * turbochannel alphas, everyone has these on some sort of SuperIO chip.
204  *
205  * ??? If this becomes less standard, move the struct out into the
206  * machine vector.
207  */
208 
209 static void __init
reserve_std_resources(void)210 reserve_std_resources(void)
211 {
212 	static struct resource standard_io_resources[] = {
213 		{ .name = "rtc", .start = 0x70, .end =  0x7f},
214         	{ .name = "dma1", .start = 0x00, .end = 0x1f },
215         	{ .name = "pic1", .start = 0x20, .end = 0x3f },
216         	{ .name = "timer", .start = 0x40, .end = 0x5f },
217         	{ .name = "keyboard", .start = 0x60, .end = 0x6f },
218         	{ .name = "dma page reg", .start = 0x80, .end = 0x8f },
219         	{ .name = "pic2", .start = 0xa0, .end = 0xbf },
220         	{ .name = "dma2", .start = 0xc0, .end = 0xdf },
221 	};
222 
223 	struct resource *io = &ioport_resource;
224 	size_t i;
225 
226 	if (hose_head) {
227 		struct pci_controller *hose;
228 		for (hose = hose_head; hose; hose = hose->next)
229 			if (hose->index == 0) {
230 				io = hose->io_space;
231 				break;
232 			}
233 	}
234 
235 	for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
236 		request_resource(io, standard_io_resources+i);
237 }
238 
239 #define PFN_MAX		PFN_DOWN(0x80000000)
240 #define for_each_mem_cluster(memdesc, _cluster, i)		\
241 	for ((_cluster) = (memdesc)->cluster, (i) = 0;		\
242 	     (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
243 
244 static unsigned long __init
get_mem_size_limit(char * s)245 get_mem_size_limit(char *s)
246 {
247         unsigned long end = 0;
248         char *from = s;
249 
250         end = simple_strtoul(from, &from, 0);
251         if ( *from == 'K' || *from == 'k' ) {
252                 end = end << 10;
253                 from++;
254         } else if ( *from == 'M' || *from == 'm' ) {
255                 end = end << 20;
256                 from++;
257         } else if ( *from == 'G' || *from == 'g' ) {
258                 end = end << 30;
259                 from++;
260         }
261         return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
262 }
263 
264 #ifdef CONFIG_BLK_DEV_INITRD
265 void * __init
move_initrd(unsigned long mem_limit)266 move_initrd(unsigned long mem_limit)
267 {
268 	void *start;
269 	unsigned long size;
270 
271 	size = initrd_end - initrd_start;
272 	start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
273 	if (!start || __pa(start) + size > mem_limit) {
274 		initrd_start = initrd_end = 0;
275 		return NULL;
276 	}
277 	memmove(start, (void *)initrd_start, size);
278 	initrd_start = (unsigned long)start;
279 	initrd_end = initrd_start + size;
280 	printk("initrd moved to %p\n", start);
281 	return start;
282 }
283 #endif
284 
285 static void __init
setup_memory(void * kernel_end)286 setup_memory(void *kernel_end)
287 {
288 	struct memclust_struct * cluster;
289 	struct memdesc_struct * memdesc;
290 	unsigned long kernel_size;
291 	unsigned long i;
292 
293 	/* Find free clusters, and init and free the bootmem accordingly.  */
294 	memdesc = (struct memdesc_struct *)
295 	  (hwrpb->mddt_offset + (unsigned long) hwrpb);
296 
297 	for_each_mem_cluster(memdesc, cluster, i) {
298 		unsigned long end;
299 
300 		printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
301 		       i, cluster->usage, cluster->start_pfn,
302 		       cluster->start_pfn + cluster->numpages);
303 
304 		end = cluster->start_pfn + cluster->numpages;
305 		if (end > max_low_pfn)
306 			max_low_pfn = end;
307 
308 		memblock_add(PFN_PHYS(cluster->start_pfn),
309 			     cluster->numpages << PAGE_SHIFT);
310 
311 		/* Bit 0 is console/PALcode reserved.  Bit 1 is
312 		   non-volatile memory -- we might want to mark
313 		   this for later.  */
314 		if (cluster->usage & 3)
315 			memblock_reserve(PFN_PHYS(cluster->start_pfn),
316 				         cluster->numpages << PAGE_SHIFT);
317 	}
318 
319 	/*
320 	 * Except for the NUMA systems (wildfire, marvel) all of the
321 	 * Alpha systems we run on support 32GB of memory or less.
322 	 * Since the NUMA systems introduce large holes in memory addressing,
323 	 * we can get into a situation where there is not enough contiguous
324 	 * memory for the memory map.
325 	 *
326 	 * Limit memory to the first 32GB to limit the NUMA systems to
327 	 * memory on their first node (wildfire) or 2 (marvel) to avoid
328 	 * not being able to produce the memory map. In order to access
329 	 * all of the memory on the NUMA systems, build with discontiguous
330 	 * memory support.
331 	 *
332 	 * If the user specified a memory limit, let that memory limit stand.
333 	 */
334 	if (!mem_size_limit)
335 		mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
336 
337 	if (mem_size_limit && max_low_pfn >= mem_size_limit)
338 	{
339 		printk("setup: forcing memory size to %ldK (from %ldK).\n",
340 		       mem_size_limit << (PAGE_SHIFT - 10),
341 		       max_low_pfn    << (PAGE_SHIFT - 10));
342 		max_low_pfn = mem_size_limit;
343 	}
344 
345 	/* Reserve the kernel memory. */
346 	kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
347 	memblock_reserve(KERNEL_START_PHYS, kernel_size);
348 
349 #ifdef CONFIG_BLK_DEV_INITRD
350 	initrd_start = INITRD_START;
351 	if (initrd_start) {
352 		initrd_end = initrd_start+INITRD_SIZE;
353 		printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
354 		       (void *) initrd_start, INITRD_SIZE);
355 
356 		if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
357 			if (!move_initrd(PFN_PHYS(max_low_pfn)))
358 				printk("initrd extends beyond end of memory "
359 				       "(0x%08lx > 0x%p)\ndisabling initrd\n",
360 				       initrd_end,
361 				       phys_to_virt(PFN_PHYS(max_low_pfn)));
362 		} else {
363 			memblock_reserve(virt_to_phys((void *)initrd_start),
364 					INITRD_SIZE);
365 		}
366 	}
367 #endif /* CONFIG_BLK_DEV_INITRD */
368 }
369 
page_is_ram(unsigned long pfn)370 int page_is_ram(unsigned long pfn)
371 {
372 	struct memclust_struct * cluster;
373 	struct memdesc_struct * memdesc;
374 	unsigned long i;
375 
376 	memdesc = (struct memdesc_struct *)
377 		(hwrpb->mddt_offset + (unsigned long) hwrpb);
378 	for_each_mem_cluster(memdesc, cluster, i)
379 	{
380 		if (pfn >= cluster->start_pfn  &&
381 		    pfn < cluster->start_pfn + cluster->numpages) {
382 			return (cluster->usage & 3) ? 0 : 1;
383 		}
384 	}
385 
386 	return 0;
387 }
388 
389 static int __init
register_cpus(void)390 register_cpus(void)
391 {
392 	int i;
393 
394 	for_each_possible_cpu(i) {
395 		struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
396 		if (!p)
397 			return -ENOMEM;
398 		register_cpu(p, i);
399 	}
400 	return 0;
401 }
402 
403 arch_initcall(register_cpus);
404 
405 #ifdef CONFIG_MAGIC_SYSRQ
sysrq_reboot_handler(u8 unused)406 static void sysrq_reboot_handler(u8 unused)
407 {
408 	machine_halt();
409 }
410 
411 static const struct sysrq_key_op srm_sysrq_reboot_op = {
412 	.handler	= sysrq_reboot_handler,
413 	.help_msg       = "reboot(b)",
414 	.action_msg     = "Resetting",
415 	.enable_mask    = SYSRQ_ENABLE_BOOT,
416 };
417 #endif
418 
419 void __init
setup_arch(char ** cmdline_p)420 setup_arch(char **cmdline_p)
421 {
422 	extern char _end[];
423 
424 	struct alpha_machine_vector *vec = NULL;
425 	struct percpu_struct *cpu;
426 	char *type_name, *var_name, *p;
427 	void *kernel_end = _end; /* end of kernel */
428 	char *args = command_line;
429 
430 	hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
431 	boot_cpuid = hard_smp_processor_id();
432 
433         /*
434 	 * Pre-process the system type to make sure it will be valid.
435 	 *
436 	 * This may restore real CABRIO and EB66+ family names, ie
437 	 * EB64+ and EB66.
438 	 *
439 	 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
440 	 * and AS1200 (DIGITAL Server 5000 series) have the type as
441 	 * the negative of the real one.
442 	 */
443         if ((long)hwrpb->sys_type < 0) {
444 		hwrpb->sys_type = -((long)hwrpb->sys_type);
445 		hwrpb_update_checksum(hwrpb);
446 	}
447 
448 	/* Register a call for panic conditions. */
449 	atomic_notifier_chain_register(&panic_notifier_list,
450 			&alpha_panic_block);
451 
452 #ifndef alpha_using_srm
453 	/* Assume that we've booted from SRM if we haven't booted from MILO.
454 	   Detect the later by looking for "MILO" in the system serial nr.  */
455 	alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO");
456 #endif
457 #ifndef alpha_using_qemu
458 	/* Similarly, look for QEMU.  */
459 	alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
460 #endif
461 
462 	/* If we are using SRM, we want to allow callbacks
463 	   as early as possible, so do this NOW, and then
464 	   they should work immediately thereafter.
465 	*/
466 	kernel_end = callback_init(kernel_end);
467 
468 	/*
469 	 * Locate the command line.
470 	 */
471 	strscpy(command_line, COMMAND_LINE, sizeof(command_line));
472 	strcpy(boot_command_line, command_line);
473 	*cmdline_p = command_line;
474 
475 	/*
476 	 * Process command-line arguments.
477 	 */
478 	while ((p = strsep(&args, " \t")) != NULL) {
479 		if (!*p) continue;
480 		if (strncmp(p, "alpha_mv=", 9) == 0) {
481 			vec = get_sysvec_byname(p+9);
482 			continue;
483 		}
484 		if (strncmp(p, "cycle=", 6) == 0) {
485 			est_cycle_freq = simple_strtol(p+6, NULL, 0);
486 			continue;
487 		}
488 		if (strncmp(p, "mem=", 4) == 0) {
489 			mem_size_limit = get_mem_size_limit(p+4);
490 			continue;
491 		}
492 		if (strncmp(p, "srmcons", 7) == 0) {
493 			srmcons_output |= 1;
494 			continue;
495 		}
496 		if (strncmp(p, "console=srm", 11) == 0) {
497 			srmcons_output |= 2;
498 			continue;
499 		}
500 		if (strncmp(p, "gartsize=", 9) == 0) {
501 			alpha_agpgart_size =
502 				get_mem_size_limit(p+9) << PAGE_SHIFT;
503 			continue;
504 		}
505 #ifdef CONFIG_VERBOSE_MCHECK
506 		if (strncmp(p, "verbose_mcheck=", 15) == 0) {
507 			alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
508 			continue;
509 		}
510 #endif
511 	}
512 
513 	/* Replace the command line, now that we've killed it with strsep.  */
514 	strcpy(command_line, boot_command_line);
515 
516 	/* If we want SRM console printk echoing early, do it now. */
517 	if (alpha_using_srm && srmcons_output) {
518 		register_srm_console();
519 
520 		/*
521 		 * If "console=srm" was specified, clear the srmcons_output
522 		 * flag now so that time.c won't unregister_srm_console
523 		 */
524 		if (srmcons_output & 2)
525 			srmcons_output = 0;
526 	}
527 
528 #ifdef CONFIG_MAGIC_SYSRQ
529 	/* If we're using SRM, make sysrq-b halt back to the prom,
530 	   not auto-reboot.  */
531 	if (alpha_using_srm) {
532 		unregister_sysrq_key('b', __sysrq_reboot_op);
533 		register_sysrq_key('b', &srm_sysrq_reboot_op);
534 	}
535 #endif
536 
537 	/*
538 	 * Identify and reconfigure for the current system.
539 	 */
540 	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
541 
542 	get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
543 		     cpu->type, &type_name, &var_name);
544 	if (*var_name == '0')
545 		var_name = "";
546 
547 	if (!vec) {
548 		vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
549 				 cpu->type);
550 	}
551 
552 	if (!vec) {
553 		panic("Unsupported system type: %s%s%s (%ld %ld)\n",
554 		      type_name, (*var_name ? " variation " : ""), var_name,
555 		      hwrpb->sys_type, hwrpb->sys_variation);
556 	}
557 	if (vec != &alpha_mv) {
558 		alpha_mv = *vec;
559 	}
560 
561 	printk("Booting "
562 #ifdef CONFIG_ALPHA_GENERIC
563 	       "GENERIC "
564 #endif
565 	       "on %s%s%s using machine vector %s from %s\n",
566 	       type_name, (*var_name ? " variation " : ""),
567 	       var_name, alpha_mv.vector_name,
568 	       (alpha_using_srm ? "SRM" : "MILO"));
569 
570 	printk("Major Options: "
571 #ifdef CONFIG_SMP
572 	       "SMP "
573 #endif
574 #ifdef CONFIG_ALPHA_EV56
575 	       "EV56 "
576 #endif
577 #ifdef CONFIG_ALPHA_EV67
578 	       "EV67 "
579 #endif
580 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
581 	       "LEGACY_START "
582 #endif
583 #ifdef CONFIG_VERBOSE_MCHECK
584 	       "VERBOSE_MCHECK "
585 #endif
586 
587 #ifdef CONFIG_DEBUG_SPINLOCK
588 	       "DEBUG_SPINLOCK "
589 #endif
590 #ifdef CONFIG_MAGIC_SYSRQ
591 	       "MAGIC_SYSRQ "
592 #endif
593 	       "\n");
594 
595 	printk("Command line: %s\n", command_line);
596 
597 	/*
598 	 * Sync up the HAE.
599 	 * Save the SRM's current value for restoration.
600 	 */
601 	srm_hae = *alpha_mv.hae_register;
602 	__set_hae(alpha_mv.hae_cache);
603 
604 	/* Reset enable correctable error reports.  */
605 	wrmces(0x7);
606 
607 	/* Find our memory.  */
608 	setup_memory(kernel_end);
609 	memblock_set_bottom_up(true);
610 	sparse_init();
611 
612 	/* First guess at cpu cache sizes.  Do this before init_arch.  */
613 	determine_cpu_caches(cpu->type);
614 
615 	/* Initialize the machine.  Usually has to do with setting up
616 	   DMA windows and the like.  */
617 	if (alpha_mv.init_arch)
618 		alpha_mv.init_arch();
619 
620 	/* Reserve standard resources.  */
621 	reserve_std_resources();
622 
623 	/*
624 	 * Give us a default console.  TGA users will see nothing until
625 	 * chr_dev_init is called, rather late in the boot sequence.
626 	 */
627 
628 #ifdef CONFIG_VT
629 #if defined(CONFIG_VGA_CONSOLE)
630 	vgacon_register_screen(&vgacon_screen_info);
631 #endif
632 #endif
633 
634 	/* Default root filesystem to sda2.  */
635 	ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2);
636 
637 #ifdef CONFIG_EISA
638 	/* FIXME:  only set this when we actually have EISA in this box? */
639 	EISA_bus = 1;
640 #endif
641 
642  	/*
643 	 * Check ASN in HWRPB for validity, report if bad.
644 	 * FIXME: how was this failing?  Should we trust it instead,
645 	 * and copy the value into alpha_mv.max_asn?
646  	 */
647 
648  	if (hwrpb->max_asn != MAX_ASN) {
649 		printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
650  	}
651 
652 	/*
653 	 * Identify the flock of penguins.
654 	 */
655 
656 #ifdef CONFIG_SMP
657 	setup_smp();
658 #endif
659 	paging_init();
660 }
661 
662 static char sys_unknown[] = "Unknown";
663 static char systype_names[][16] = {
664 	"0",
665 	"ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
666 	"Pelican", "Morgan", "Sable", "Medulla", "Noname",
667 	"Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
668 	"Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
669 	"Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
670 	"Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
671 	"Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
672 };
673 
674 static char unofficial_names[][8] = {"100", "Ruffian"};
675 
676 static char api_names[][16] = {"200", "Nautilus"};
677 
678 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
679 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
680 
681 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
682 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
683 
684 static char marvel_names[][16] = {
685 	"Marvel/EV7"
686 };
687 static int marvel_indices[] = { 0 };
688 
689 static char rawhide_names[][16] = {
690 	"Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
691 };
692 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
693 
694 static char titan_names[][16] = {
695 	"DEFAULT", "Privateer", "Falcon", "Granite"
696 };
697 static int titan_indices[] = {0,1,2,2,3};
698 
699 static char tsunami_names[][16] = {
700 	"0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
701 	"Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
702 	"Flying Clipper", "Shark"
703 };
704 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
705 
706 static struct alpha_machine_vector * __init
get_sysvec(unsigned long type,unsigned long variation,unsigned long cpu)707 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
708 {
709 	static struct alpha_machine_vector *systype_vecs[] __initdata =
710 	{
711 		NULL,		/* 0 */
712 		NULL,		/* ADU */
713 		NULL,		/* Cobra */
714 		NULL,		/* Ruby */
715 		NULL,		/* Flamingo */
716 		NULL,		/* Mannequin */
717 		NULL,		/* Jensens */
718 		NULL, 		/* Pelican */
719 		NULL,		/* Morgan */
720 		NULL,		/* Sable -- see below.  */
721 		NULL,		/* Medulla */
722 		NULL,		/* Noname */
723 		NULL,		/* Turbolaser */
724 		NULL,		/* Avanti */
725 		NULL,		/* Mustang */
726 		NULL,		/* Alcor, Bret, Maverick. HWRPB inaccurate? */
727 		NULL,		/* Tradewind */
728 		NULL,		/* Mikasa -- see below.  */
729 		NULL,		/* EB64 */
730 		NULL,		/* EB66 */
731 		NULL,		/* EB64+ */
732 		NULL,		/* Alphabook1 */
733 		&rawhide_mv,
734 		NULL,		/* K2 */
735 		NULL,		/* Lynx */
736 		NULL,		/* XL */
737 		NULL,		/* EB164 -- see variation.  */
738 		NULL,		/* Noritake -- see below.  */
739 		NULL,		/* Cortex */
740 		NULL,		/* 29 */
741 		&miata_mv,
742 		NULL,		/* XXM */
743 		&takara_mv,
744 		NULL,		/* Yukon */
745 		NULL,		/* Tsunami -- see variation.  */
746 		&wildfire_mv,	/* Wildfire */
747 		NULL,		/* CUSCO */
748 		&eiger_mv,	/* Eiger */
749 		NULL,		/* Titan */
750 		NULL,		/* Marvel */
751 	};
752 
753 	static struct alpha_machine_vector *unofficial_vecs[] __initdata =
754 	{
755 		NULL,		/* 100 */
756 		&ruffian_mv,
757 	};
758 
759 	static struct alpha_machine_vector *api_vecs[] __initdata =
760 	{
761 		NULL,		/* 200 */
762 		&nautilus_mv,
763 	};
764 
765 	static struct alpha_machine_vector *alcor_vecs[] __initdata =
766 	{
767 		&alcor_mv, &xlt_mv, &xlt_mv
768 	};
769 
770 	static struct alpha_machine_vector *eb164_vecs[] __initdata =
771 	{
772 		&eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
773 	};
774 
775 	static struct alpha_machine_vector *marvel_vecs[] __initdata =
776 	{
777 		&marvel_ev7_mv,
778 	};
779 
780 	static struct alpha_machine_vector *titan_vecs[] __initdata =
781 	{
782 		&titan_mv,		/* default   */
783 		&privateer_mv,		/* privateer */
784 		&titan_mv,		/* falcon    */
785 		&privateer_mv,		/* granite   */
786 	};
787 
788 	static struct alpha_machine_vector *tsunami_vecs[]  __initdata =
789 	{
790 		NULL,
791 		&dp264_mv,		/* dp264 */
792 		&dp264_mv,		/* warhol */
793 		&dp264_mv,		/* windjammer */
794 		&monet_mv,		/* monet */
795 		&clipper_mv,		/* clipper */
796 		&dp264_mv,		/* goldrush */
797 		&webbrick_mv,		/* webbrick */
798 		&dp264_mv,		/* catamaran */
799 		NULL,			/* brisbane? */
800 		NULL,			/* melbourne? */
801 		NULL,			/* flying clipper? */
802 		&shark_mv,		/* shark */
803 	};
804 
805 	/* ??? Do we need to distinguish between Rawhides?  */
806 
807 	struct alpha_machine_vector *vec;
808 
809 	/* Search the system tables first... */
810 	vec = NULL;
811 	if (type < ARRAY_SIZE(systype_vecs)) {
812 		vec = systype_vecs[type];
813 	} else if ((type > ST_API_BIAS) &&
814 		   (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
815 		vec = api_vecs[type - ST_API_BIAS];
816 	} else if ((type > ST_UNOFFICIAL_BIAS) &&
817 		   (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
818 		vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
819 	}
820 
821 	/* If we've not found one, try for a variation.  */
822 
823 	if (!vec) {
824 		/* Member ID is a bit-field. */
825 		unsigned long member = (variation >> 10) & 0x3f;
826 
827 		cpu &= 0xffffffff; /* make it usable */
828 
829 		switch (type) {
830 		case ST_DEC_ALCOR:
831 			if (member < ARRAY_SIZE(alcor_indices))
832 				vec = alcor_vecs[alcor_indices[member]];
833 			break;
834 		case ST_DEC_EB164:
835 			if (member < ARRAY_SIZE(eb164_indices))
836 				vec = eb164_vecs[eb164_indices[member]];
837 			/* PC164 may show as EB164 variation with EV56 CPU,
838 			   but, since no true EB164 had anything but EV5... */
839 			if (vec == &eb164_mv && cpu == EV56_CPU)
840 				vec = &pc164_mv;
841 			break;
842 		case ST_DEC_MARVEL:
843 			if (member < ARRAY_SIZE(marvel_indices))
844 				vec = marvel_vecs[marvel_indices[member]];
845 			break;
846 		case ST_DEC_TITAN:
847 			vec = titan_vecs[0];	/* default */
848 			if (member < ARRAY_SIZE(titan_indices))
849 				vec = titan_vecs[titan_indices[member]];
850 			break;
851 		case ST_DEC_TSUNAMI:
852 			if (member < ARRAY_SIZE(tsunami_indices))
853 				vec = tsunami_vecs[tsunami_indices[member]];
854 			break;
855 		case ST_DEC_1000:
856 			vec = &mikasa_primo_mv;
857 			break;
858 		case ST_DEC_NORITAKE:
859 			vec = &noritake_primo_mv;
860 			break;
861 		case ST_DEC_2100_A500:
862 			vec = &sable_gamma_mv;
863 			break;
864 		}
865 	}
866 	return vec;
867 }
868 
869 static struct alpha_machine_vector * __init
get_sysvec_byname(const char * name)870 get_sysvec_byname(const char *name)
871 {
872 	static struct alpha_machine_vector *all_vecs[] __initdata =
873 	{
874 		&alcor_mv,
875 		&clipper_mv,
876 		&dp264_mv,
877 		&eb164_mv,
878 		&eiger_mv,
879 		&lx164_mv,
880 		&miata_mv,
881 		&mikasa_primo_mv,
882 		&monet_mv,
883 		&nautilus_mv,
884 		&noritake_primo_mv,
885 		&pc164_mv,
886 		&privateer_mv,
887 		&rawhide_mv,
888 		&ruffian_mv,
889 		&rx164_mv,
890 		&sable_gamma_mv,
891 		&shark_mv,
892 		&sx164_mv,
893 		&takara_mv,
894 		&webbrick_mv,
895 		&wildfire_mv,
896 		&xlt_mv
897 	};
898 
899 	size_t i;
900 
901 	for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
902 		struct alpha_machine_vector *mv = all_vecs[i];
903 		if (strcasecmp(mv->vector_name, name) == 0)
904 			return mv;
905 	}
906 	return NULL;
907 }
908 
909 static void
get_sysnames(unsigned long type,unsigned long variation,unsigned long cpu,char ** type_name,char ** variation_name)910 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
911 	     char **type_name, char **variation_name)
912 {
913 	unsigned long member;
914 
915 	/* If not in the tables, make it UNKNOWN,
916 	   else set type name to family */
917 	if (type < ARRAY_SIZE(systype_names)) {
918 		*type_name = systype_names[type];
919 	} else if ((type > ST_API_BIAS) &&
920 		   (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
921 		*type_name = api_names[type - ST_API_BIAS];
922 	} else if ((type > ST_UNOFFICIAL_BIAS) &&
923 		   (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
924 		*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
925 	} else {
926 		*type_name = sys_unknown;
927 		*variation_name = sys_unknown;
928 		return;
929 	}
930 
931 	/* Set variation to "0"; if variation is zero, done.  */
932 	*variation_name = systype_names[0];
933 	if (variation == 0) {
934 		return;
935 	}
936 
937 	member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
938 
939 	cpu &= 0xffffffff; /* make it usable */
940 
941 	switch (type) { /* select by family */
942 	default: /* default to variation "0" for now */
943 		break;
944 	case ST_DEC_EB164:
945 		if (member >= ARRAY_SIZE(eb164_indices))
946 			break;
947 		*variation_name = eb164_names[eb164_indices[member]];
948 		/* PC164 may show as EB164 variation, but with EV56 CPU,
949 		   so, since no true EB164 had anything but EV5... */
950 		if (eb164_indices[member] == 0 && cpu == EV56_CPU)
951 			*variation_name = eb164_names[1]; /* make it PC164 */
952 		break;
953 	case ST_DEC_ALCOR:
954 		if (member < ARRAY_SIZE(alcor_indices))
955 			*variation_name = alcor_names[alcor_indices[member]];
956 		break;
957 	case ST_DEC_MARVEL:
958 		if (member < ARRAY_SIZE(marvel_indices))
959 			*variation_name = marvel_names[marvel_indices[member]];
960 		break;
961 	case ST_DEC_RAWHIDE:
962 		if (member < ARRAY_SIZE(rawhide_indices))
963 			*variation_name = rawhide_names[rawhide_indices[member]];
964 		break;
965 	case ST_DEC_TITAN:
966 		*variation_name = titan_names[0];	/* default */
967 		if (member < ARRAY_SIZE(titan_indices))
968 			*variation_name = titan_names[titan_indices[member]];
969 		break;
970 	case ST_DEC_TSUNAMI:
971 		if (member < ARRAY_SIZE(tsunami_indices))
972 			*variation_name = tsunami_names[tsunami_indices[member]];
973 		break;
974 	}
975 }
976 
977 /*
978  * A change was made to the HWRPB via an ECO and the following code
979  * tracks a part of the ECO.  In HWRPB versions less than 5, the ECO
980  * was not implemented in the console firmware.  If it's revision 5 or
981  * greater we can get the name of the platform as an ASCII string from
982  * the HWRPB.  That's what this function does.  It checks the revision
983  * level and if the string is in the HWRPB it returns the address of
984  * the string--a pointer to the name of the platform.
985  *
986  * Returns:
987  *      - Pointer to a ASCII string if it's in the HWRPB
988  *      - Pointer to a blank string if the data is not in the HWRPB.
989  */
990 
991 static char *
platform_string(void)992 platform_string(void)
993 {
994 	struct dsr_struct *dsr;
995 	static char unk_system_string[] = "N/A";
996 
997 	/* Go to the console for the string pointer.
998 	 * If the rpb_vers is not 5 or greater the rpb
999 	 * is old and does not have this data in it.
1000 	 */
1001 	if (hwrpb->revision < 5)
1002 		return (unk_system_string);
1003 	else {
1004 		/* The Dynamic System Recognition struct
1005 		 * has the system platform name starting
1006 		 * after the character count of the string.
1007 		 */
1008 		dsr =  ((struct dsr_struct *)
1009 			((char *)hwrpb + hwrpb->dsr_offset));
1010 		return ((char *)dsr + (dsr->sysname_off +
1011 				       sizeof(long)));
1012 	}
1013 }
1014 
1015 static int
get_nr_processors(struct percpu_struct * cpubase,unsigned long num)1016 get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1017 {
1018 	struct percpu_struct *cpu;
1019 	unsigned long i;
1020 	int count = 0;
1021 
1022 	for (i = 0; i < num; i++) {
1023 		cpu = (struct percpu_struct *)
1024 			((char *)cpubase + i*hwrpb->processor_size);
1025 		if ((cpu->flags & 0x1cc) == 0x1cc)
1026 			count++;
1027 	}
1028 	return count;
1029 }
1030 
1031 static void
show_cache_size(struct seq_file * f,const char * which,int shape)1032 show_cache_size (struct seq_file *f, const char *which, int shape)
1033 {
1034 	if (shape == -1)
1035 		seq_printf (f, "%s\t\t: n/a\n", which);
1036 	else if (shape == 0)
1037 		seq_printf (f, "%s\t\t: unknown\n", which);
1038 	else
1039 		seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1040 			    which, shape >> 10, shape & 15,
1041 			    1 << ((shape >> 4) & 15));
1042 }
1043 
1044 static int
show_cpuinfo(struct seq_file * f,void * slot)1045 show_cpuinfo(struct seq_file *f, void *slot)
1046 {
1047 	extern struct unaligned_stat {
1048 		unsigned long count, va, pc;
1049 	} unaligned[2];
1050 
1051 	static char cpu_names[][8] = {
1052 		"EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1053 		"EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1054 		"EV68CX", "EV7", "EV79", "EV69"
1055 	};
1056 
1057 	struct percpu_struct *cpu = slot;
1058 	unsigned int cpu_index;
1059 	char *cpu_name;
1060 	char *systype_name;
1061 	char *sysvariation_name;
1062 	int nr_processors;
1063 	unsigned long timer_freq;
1064 
1065 	cpu_index = (unsigned) (cpu->type - 1);
1066 	cpu_name = "Unknown";
1067 	if (cpu_index < ARRAY_SIZE(cpu_names))
1068 		cpu_name = cpu_names[cpu_index];
1069 
1070 	get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1071 		     cpu->type, &systype_name, &sysvariation_name);
1072 
1073 	nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1074 
1075 #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
1076 	timer_freq = (100UL * hwrpb->intr_freq) / 4096;
1077 #else
1078 	timer_freq = 100UL * CONFIG_HZ;
1079 #endif
1080 
1081 	seq_printf(f, "cpu\t\t\t: Alpha\n"
1082 		      "cpu model\t\t: %s\n"
1083 		      "cpu variation\t\t: %ld\n"
1084 		      "cpu revision\t\t: %ld\n"
1085 		      "cpu serial number\t: %s\n"
1086 		      "system type\t\t: %s\n"
1087 		      "system variation\t: %s\n"
1088 		      "system revision\t\t: %ld\n"
1089 		      "system serial number\t: %s\n"
1090 		      "cycle frequency [Hz]\t: %lu %s\n"
1091 		      "timer frequency [Hz]\t: %lu.%02lu\n"
1092 		      "page size [bytes]\t: %ld\n"
1093 		      "phys. address bits\t: %ld\n"
1094 		      "max. addr. space #\t: %ld\n"
1095 		      "BogoMIPS\t\t: %lu.%02lu\n"
1096 		      "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1097 		      "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1098 		      "platform string\t\t: %s\n"
1099 		      "cpus detected\t\t: %d\n",
1100 		       cpu_name, cpu->variation, cpu->revision,
1101 		       (char*)cpu->serial_no,
1102 		       systype_name, sysvariation_name, hwrpb->sys_revision,
1103 		       (char*)hwrpb->ssn,
1104 		       est_cycle_freq ? : hwrpb->cycle_freq,
1105 		       est_cycle_freq ? "est." : "",
1106 		       timer_freq / 100, timer_freq % 100,
1107 		       hwrpb->pagesize,
1108 		       hwrpb->pa_bits,
1109 		       hwrpb->max_asn,
1110 		       loops_per_jiffy / (500000/HZ),
1111 		       (loops_per_jiffy / (5000/HZ)) % 100,
1112 		       unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1113 		       unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1114 		       platform_string(), nr_processors);
1115 
1116 #ifdef CONFIG_SMP
1117 	seq_printf(f, "cpus active\t\t: %u\n"
1118 		      "cpu active mask\t\t: %016lx\n",
1119 		       num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1120 #endif
1121 
1122 	show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1123 	show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1124 	show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1125 	show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1126 
1127 	return 0;
1128 }
1129 
1130 static int __init
read_mem_block(int * addr,int stride,int size)1131 read_mem_block(int *addr, int stride, int size)
1132 {
1133 	long nloads = size / stride, cnt, tmp;
1134 
1135 	__asm__ __volatile__(
1136 	"	rpcc    %0\n"
1137 	"1:	ldl	%3,0(%2)\n"
1138 	"	subq	%1,1,%1\n"
1139 	/* Next two XORs introduce an explicit data dependency between
1140 	   consecutive loads in the loop, which will give us true load
1141 	   latency. */
1142 	"	xor	%3,%2,%2\n"
1143 	"	xor	%3,%2,%2\n"
1144 	"	addq	%2,%4,%2\n"
1145 	"	bne	%1,1b\n"
1146 	"	rpcc	%3\n"
1147 	"	subl	%3,%0,%0\n"
1148 	: "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1149 	: "r" (stride), "1" (nloads), "2" (addr));
1150 
1151 	return cnt / (size / stride);
1152 }
1153 
1154 #define CSHAPE(totalsize, linesize, assoc) \
1155   ((totalsize & ~0xff) | (linesize << 4) | assoc)
1156 
1157 /* ??? EV5 supports up to 64M, but did the systems with more than
1158    16M of BCACHE ever exist? */
1159 #define MAX_BCACHE_SIZE	16*1024*1024
1160 
1161 /* Note that the offchip caches are direct mapped on all Alphas. */
1162 static int __init
external_cache_probe(int minsize,int width)1163 external_cache_probe(int minsize, int width)
1164 {
1165 	int cycles, prev_cycles = 1000000;
1166 	int stride = 1 << width;
1167 	long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1168 
1169 	if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1170 		maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT);
1171 
1172 	/* Get the first block cached. */
1173 	read_mem_block(__va(0), stride, size);
1174 
1175 	while (size < maxsize) {
1176 		/* Get an average load latency in cycles. */
1177 		cycles = read_mem_block(__va(0), stride, size);
1178 		if (cycles > prev_cycles * 2) {
1179 			/* Fine, we exceed the cache. */
1180 			printk("%ldK Bcache detected; load hit latency %d "
1181 			       "cycles, load miss latency %d cycles\n",
1182 			       size >> 11, prev_cycles, cycles);
1183 			return CSHAPE(size >> 1, width, 1);
1184 		}
1185 		/* Try to get the next block cached. */
1186 		read_mem_block(__va(size), stride, size);
1187 		prev_cycles = cycles;
1188 		size <<= 1;
1189 	}
1190 	return -1;	/* No BCACHE found. */
1191 }
1192 
1193 static void __init
determine_cpu_caches(unsigned int cpu_type)1194 determine_cpu_caches (unsigned int cpu_type)
1195 {
1196 	int L1I, L1D, L2, L3;
1197 
1198 	switch (cpu_type) {
1199 	case EV4_CPU:
1200 	case EV45_CPU:
1201 	  {
1202 		if (cpu_type == EV4_CPU)
1203 			L1I = CSHAPE(8*1024, 5, 1);
1204 		else
1205 			L1I = CSHAPE(16*1024, 5, 1);
1206 		L1D = L1I;
1207 		L3 = -1;
1208 
1209 		/* BIU_CTL is a write-only Abox register.  PALcode has a
1210 		   shadow copy, and may be available from some versions
1211 		   of the CSERVE PALcall.  If we can get it, then
1212 
1213 			unsigned long biu_ctl, size;
1214 			size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1215 			L2 = CSHAPE (size, 5, 1);
1216 
1217 		   Unfortunately, we can't rely on that.
1218 		*/
1219 		L2 = external_cache_probe(128*1024, 5);
1220 		break;
1221 	  }
1222 
1223 	case LCA4_CPU:
1224 	  {
1225 		unsigned long car, size;
1226 
1227 		L1I = L1D = CSHAPE(8*1024, 5, 1);
1228 		L3 = -1;
1229 
1230 		car = *(vuip) phys_to_virt (0x120000078UL);
1231 		size = 64*1024 * (1 << ((car >> 5) & 7));
1232 		/* No typo -- 8 byte cacheline size.  Whodathunk.  */
1233 		L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1234 		break;
1235 	  }
1236 
1237 	case EV5_CPU:
1238 	case EV56_CPU:
1239 	  {
1240 		unsigned long sc_ctl, width;
1241 
1242 		L1I = L1D = CSHAPE(8*1024, 5, 1);
1243 
1244 		/* Check the line size of the Scache.  */
1245 		sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1246 		width = sc_ctl & 0x1000 ? 6 : 5;
1247 		L2 = CSHAPE (96*1024, width, 3);
1248 
1249 		/* BC_CONTROL and BC_CONFIG are write-only IPRs.  PALcode
1250 		   has a shadow copy, and may be available from some versions
1251 		   of the CSERVE PALcall.  If we can get it, then
1252 
1253 			unsigned long bc_control, bc_config, size;
1254 			size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1255 			L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1256 
1257 		   Unfortunately, we can't rely on that.
1258 		*/
1259 		L3 = external_cache_probe(1024*1024, width);
1260 		break;
1261 	  }
1262 
1263 	case PCA56_CPU:
1264 	case PCA57_CPU:
1265 	  {
1266 		if (cpu_type == PCA56_CPU) {
1267 			L1I = CSHAPE(16*1024, 6, 1);
1268 			L1D = CSHAPE(8*1024, 5, 1);
1269 		} else {
1270 			L1I = CSHAPE(32*1024, 6, 2);
1271 			L1D = CSHAPE(16*1024, 5, 1);
1272 		}
1273 		L3 = -1;
1274 
1275 #if 0
1276 		unsigned long cbox_config, size;
1277 
1278 		cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1279 		size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1280 
1281 		L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1282 #else
1283 		L2 = external_cache_probe(512*1024, 6);
1284 #endif
1285 		break;
1286 	  }
1287 
1288 	case EV6_CPU:
1289 	case EV67_CPU:
1290 	case EV68CB_CPU:
1291 	case EV68AL_CPU:
1292 	case EV68CX_CPU:
1293 	case EV69_CPU:
1294 		L1I = L1D = CSHAPE(64*1024, 6, 2);
1295 		L2 = external_cache_probe(1024*1024, 6);
1296 		L3 = -1;
1297 		break;
1298 
1299 	case EV7_CPU:
1300 	case EV79_CPU:
1301 		L1I = L1D = CSHAPE(64*1024, 6, 2);
1302 		L2 = CSHAPE(7*1024*1024/4, 6, 7);
1303 		L3 = -1;
1304 		break;
1305 
1306 	default:
1307 		/* Nothing known about this cpu type.  */
1308 		L1I = L1D = L2 = L3 = 0;
1309 		break;
1310 	}
1311 
1312 	alpha_l1i_cacheshape = L1I;
1313 	alpha_l1d_cacheshape = L1D;
1314 	alpha_l2_cacheshape = L2;
1315 	alpha_l3_cacheshape = L3;
1316 }
1317 
1318 /*
1319  * We show only CPU #0 info.
1320  */
1321 static void *
c_start(struct seq_file * f,loff_t * pos)1322 c_start(struct seq_file *f, loff_t *pos)
1323 {
1324 	return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1325 }
1326 
1327 static void *
c_next(struct seq_file * f,void * v,loff_t * pos)1328 c_next(struct seq_file *f, void *v, loff_t *pos)
1329 {
1330 	(*pos)++;
1331 	return NULL;
1332 }
1333 
1334 static void
c_stop(struct seq_file * f,void * v)1335 c_stop(struct seq_file *f, void *v)
1336 {
1337 }
1338 
1339 const struct seq_operations cpuinfo_op = {
1340 	.start	= c_start,
1341 	.next	= c_next,
1342 	.stop	= c_stop,
1343 	.show	= show_cpuinfo,
1344 };
1345 
1346 
1347 static int
alpha_panic_event(struct notifier_block * this,unsigned long event,void * ptr)1348 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1349 {
1350 #if 1
1351 	/* FIXME FIXME FIXME */
1352 	/* If we are using SRM and serial console, just hard halt here. */
1353 	if (alpha_using_srm && srmcons_output)
1354 		__halt();
1355 #endif
1356         return NOTIFY_DONE;
1357 }
1358 
add_pcspkr(void)1359 static __init int add_pcspkr(void)
1360 {
1361 	struct platform_device *pd;
1362 	int ret;
1363 
1364 	pd = platform_device_alloc("pcspkr", -1);
1365 	if (!pd)
1366 		return -ENOMEM;
1367 
1368 	ret = platform_device_add(pd);
1369 	if (ret)
1370 		platform_device_put(pd);
1371 
1372 	return ret;
1373 }
1374 device_initcall(add_pcspkr);
1375