xref: /linux/arch/alpha/kernel/setup.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  *  linux/arch/alpha/kernel/setup.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  */
6 
7 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
8 
9 /*
10  * Bootup setup stuff.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/stddef.h>
17 #include <linux/unistd.h>
18 #include <linux/ptrace.h>
19 #include <linux/slab.h>
20 #include <linux/user.h>
21 #include <linux/a.out.h>
22 #include <linux/tty.h>
23 #include <linux/delay.h>
24 #include <linux/config.h>	/* CONFIG_ALPHA_LCA etc */
25 #include <linux/mc146818rtc.h>
26 #include <linux/console.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/ioport.h>
31 #include <linux/bootmem.h>
32 #include <linux/pci.h>
33 #include <linux/seq_file.h>
34 #include <linux/root_dev.h>
35 #include <linux/initrd.h>
36 #include <linux/eisa.h>
37 #include <linux/pfn.h>
38 #ifdef CONFIG_MAGIC_SYSRQ
39 #include <linux/sysrq.h>
40 #include <linux/reboot.h>
41 #endif
42 #include <linux/notifier.h>
43 #include <asm/setup.h>
44 #include <asm/io.h>
45 
46 extern struct atomic_notifier_head panic_notifier_list;
47 static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
48 static struct notifier_block alpha_panic_block = {
49 	alpha_panic_event,
50         NULL,
51         INT_MAX /* try to do it first */
52 };
53 
54 #include <asm/uaccess.h>
55 #include <asm/pgtable.h>
56 #include <asm/system.h>
57 #include <asm/hwrpb.h>
58 #include <asm/dma.h>
59 #include <asm/io.h>
60 #include <asm/mmu_context.h>
61 #include <asm/console.h>
62 
63 #include "proto.h"
64 #include "pci_impl.h"
65 
66 
67 struct hwrpb_struct *hwrpb;
68 unsigned long srm_hae;
69 
70 int alpha_l1i_cacheshape;
71 int alpha_l1d_cacheshape;
72 int alpha_l2_cacheshape;
73 int alpha_l3_cacheshape;
74 
75 #ifdef CONFIG_VERBOSE_MCHECK
76 /* 0=minimum, 1=verbose, 2=all */
77 /* These can be overridden via the command line, ie "verbose_mcheck=2") */
78 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
79 #endif
80 
81 /* Which processor we booted from.  */
82 int boot_cpuid;
83 
84 /*
85  * Using SRM callbacks for initial console output. This works from
86  * setup_arch() time through the end of time_init(), as those places
87  * are under our (Alpha) control.
88 
89  * "srmcons" specified in the boot command arguments allows us to
90  * see kernel messages during the period of time before the true
91  * console device is "registered" during console_init().
92  * As of this version (2.5.59), console_init() will call
93  * disable_early_printk() as the last action before initializing
94  * the console drivers. That's the last possible time srmcons can be
95  * unregistered without interfering with console behavior.
96  *
97  * By default, OFF; set it with a bootcommand arg of "srmcons" or
98  * "console=srm". The meaning of these two args is:
99  *     "srmcons"     - early callback prints
100  *     "console=srm" - full callback based console, including early prints
101  */
102 int srmcons_output = 0;
103 
104 /* Enforce a memory size limit; useful for testing. By default, none. */
105 unsigned long mem_size_limit = 0;
106 
107 /* Set AGP GART window size (0 means disabled). */
108 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
109 
110 #ifdef CONFIG_ALPHA_GENERIC
111 struct alpha_machine_vector alpha_mv;
112 int alpha_using_srm;
113 #endif
114 
115 #define N(a) (sizeof(a)/sizeof(a[0]))
116 
117 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
118 					       unsigned long);
119 static struct alpha_machine_vector *get_sysvec_byname(const char *);
120 static void get_sysnames(unsigned long, unsigned long, unsigned long,
121 			 char **, char **);
122 static void determine_cpu_caches (unsigned int);
123 
124 static char command_line[COMMAND_LINE_SIZE];
125 
126 /*
127  * The format of "screen_info" is strange, and due to early
128  * i386-setup code. This is just enough to make the console
129  * code think we're on a VGA color display.
130  */
131 
132 struct screen_info screen_info = {
133 	.orig_x = 0,
134 	.orig_y = 25,
135 	.orig_video_cols = 80,
136 	.orig_video_lines = 25,
137 	.orig_video_isVGA = 1,
138 	.orig_video_points = 16
139 };
140 
141 /*
142  * The direct map I/O window, if any.  This should be the same
143  * for all busses, since it's used by virt_to_bus.
144  */
145 
146 unsigned long __direct_map_base;
147 unsigned long __direct_map_size;
148 
149 /*
150  * Declare all of the machine vectors.
151  */
152 
153 /* GCC 2.7.2 (on alpha at least) is lame.  It does not support either
154    __attribute__((weak)) or #pragma weak.  Bypass it and talk directly
155    to the assembler.  */
156 
157 #define WEAK(X) \
158 	extern struct alpha_machine_vector X; \
159 	asm(".weak "#X)
160 
161 WEAK(alcor_mv);
162 WEAK(alphabook1_mv);
163 WEAK(avanti_mv);
164 WEAK(cabriolet_mv);
165 WEAK(clipper_mv);
166 WEAK(dp264_mv);
167 WEAK(eb164_mv);
168 WEAK(eb64p_mv);
169 WEAK(eb66_mv);
170 WEAK(eb66p_mv);
171 WEAK(eiger_mv);
172 WEAK(jensen_mv);
173 WEAK(lx164_mv);
174 WEAK(lynx_mv);
175 WEAK(marvel_ev7_mv);
176 WEAK(miata_mv);
177 WEAK(mikasa_mv);
178 WEAK(mikasa_primo_mv);
179 WEAK(monet_mv);
180 WEAK(nautilus_mv);
181 WEAK(noname_mv);
182 WEAK(noritake_mv);
183 WEAK(noritake_primo_mv);
184 WEAK(p2k_mv);
185 WEAK(pc164_mv);
186 WEAK(privateer_mv);
187 WEAK(rawhide_mv);
188 WEAK(ruffian_mv);
189 WEAK(rx164_mv);
190 WEAK(sable_mv);
191 WEAK(sable_gamma_mv);
192 WEAK(shark_mv);
193 WEAK(sx164_mv);
194 WEAK(takara_mv);
195 WEAK(titan_mv);
196 WEAK(webbrick_mv);
197 WEAK(wildfire_mv);
198 WEAK(xl_mv);
199 WEAK(xlt_mv);
200 
201 #undef WEAK
202 
203 /*
204  * I/O resources inherited from PeeCees.  Except for perhaps the
205  * turbochannel alphas, everyone has these on some sort of SuperIO chip.
206  *
207  * ??? If this becomes less standard, move the struct out into the
208  * machine vector.
209  */
210 
211 static void __init
212 reserve_std_resources(void)
213 {
214 	static struct resource standard_io_resources[] = {
215 		{ .name = "rtc", .start = -1, .end = -1 },
216         	{ .name = "dma1", .start = 0x00, .end = 0x1f },
217         	{ .name = "pic1", .start = 0x20, .end = 0x3f },
218         	{ .name = "timer", .start = 0x40, .end = 0x5f },
219         	{ .name = "keyboard", .start = 0x60, .end = 0x6f },
220         	{ .name = "dma page reg", .start = 0x80, .end = 0x8f },
221         	{ .name = "pic2", .start = 0xa0, .end = 0xbf },
222         	{ .name = "dma2", .start = 0xc0, .end = 0xdf },
223 	};
224 
225 	struct resource *io = &ioport_resource;
226 	size_t i;
227 
228 	if (hose_head) {
229 		struct pci_controller *hose;
230 		for (hose = hose_head; hose; hose = hose->next)
231 			if (hose->index == 0) {
232 				io = hose->io_space;
233 				break;
234 			}
235 	}
236 
237 	/* Fix up for the Jensen's queer RTC placement.  */
238 	standard_io_resources[0].start = RTC_PORT(0);
239 	standard_io_resources[0].end = RTC_PORT(0) + 0x10;
240 
241 	for (i = 0; i < N(standard_io_resources); ++i)
242 		request_resource(io, standard_io_resources+i);
243 }
244 
245 #define PFN_MAX		PFN_DOWN(0x80000000)
246 #define for_each_mem_cluster(memdesc, cluster, i)		\
247 	for ((cluster) = (memdesc)->cluster, (i) = 0;		\
248 	     (i) < (memdesc)->numclusters; (i)++, (cluster)++)
249 
250 static unsigned long __init
251 get_mem_size_limit(char *s)
252 {
253         unsigned long end = 0;
254         char *from = s;
255 
256         end = simple_strtoul(from, &from, 0);
257         if ( *from == 'K' || *from == 'k' ) {
258                 end = end << 10;
259                 from++;
260         } else if ( *from == 'M' || *from == 'm' ) {
261                 end = end << 20;
262                 from++;
263         } else if ( *from == 'G' || *from == 'g' ) {
264                 end = end << 30;
265                 from++;
266         }
267         return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
268 }
269 
270 #ifdef CONFIG_BLK_DEV_INITRD
271 void * __init
272 move_initrd(unsigned long mem_limit)
273 {
274 	void *start;
275 	unsigned long size;
276 
277 	size = initrd_end - initrd_start;
278 	start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
279 	if (!start || __pa(start) + size > mem_limit) {
280 		initrd_start = initrd_end = 0;
281 		return NULL;
282 	}
283 	memmove(start, (void *)initrd_start, size);
284 	initrd_start = (unsigned long)start;
285 	initrd_end = initrd_start + size;
286 	printk("initrd moved to %p\n", start);
287 	return start;
288 }
289 #endif
290 
291 #ifndef CONFIG_DISCONTIGMEM
292 static void __init
293 setup_memory(void *kernel_end)
294 {
295 	struct memclust_struct * cluster;
296 	struct memdesc_struct * memdesc;
297 	unsigned long start_kernel_pfn, end_kernel_pfn;
298 	unsigned long bootmap_size, bootmap_pages, bootmap_start;
299 	unsigned long start, end;
300 	unsigned long i;
301 
302 	/* Find free clusters, and init and free the bootmem accordingly.  */
303 	memdesc = (struct memdesc_struct *)
304 	  (hwrpb->mddt_offset + (unsigned long) hwrpb);
305 
306 	for_each_mem_cluster(memdesc, cluster, i) {
307 		printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
308 		       i, cluster->usage, cluster->start_pfn,
309 		       cluster->start_pfn + cluster->numpages);
310 
311 		/* Bit 0 is console/PALcode reserved.  Bit 1 is
312 		   non-volatile memory -- we might want to mark
313 		   this for later.  */
314 		if (cluster->usage & 3)
315 			continue;
316 
317 		end = cluster->start_pfn + cluster->numpages;
318 		if (end > max_low_pfn)
319 			max_low_pfn = end;
320 	}
321 
322 	/*
323 	 * Except for the NUMA systems (wildfire, marvel) all of the
324 	 * Alpha systems we run on support 32GB of memory or less.
325 	 * Since the NUMA systems introduce large holes in memory addressing,
326 	 * we can get into a situation where there is not enough contiguous
327 	 * memory for the memory map.
328 	 *
329 	 * Limit memory to the first 32GB to limit the NUMA systems to
330 	 * memory on their first node (wildfire) or 2 (marvel) to avoid
331 	 * not being able to produce the memory map. In order to access
332 	 * all of the memory on the NUMA systems, build with discontiguous
333 	 * memory support.
334 	 *
335 	 * If the user specified a memory limit, let that memory limit stand.
336 	 */
337 	if (!mem_size_limit)
338 		mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
339 
340 	if (mem_size_limit && max_low_pfn >= mem_size_limit)
341 	{
342 		printk("setup: forcing memory size to %ldK (from %ldK).\n",
343 		       mem_size_limit << (PAGE_SHIFT - 10),
344 		       max_low_pfn    << (PAGE_SHIFT - 10));
345 		max_low_pfn = mem_size_limit;
346 	}
347 
348 	/* Find the bounds of kernel memory.  */
349 	start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
350 	end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
351 	bootmap_start = -1;
352 
353  try_again:
354 	if (max_low_pfn <= end_kernel_pfn)
355 		panic("not enough memory to boot");
356 
357 	/* We need to know how many physically contiguous pages
358 	   we'll need for the bootmap.  */
359 	bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
360 
361 	/* Now find a good region where to allocate the bootmap.  */
362 	for_each_mem_cluster(memdesc, cluster, i) {
363 		if (cluster->usage & 3)
364 			continue;
365 
366 		start = cluster->start_pfn;
367 		end = start + cluster->numpages;
368 		if (start >= max_low_pfn)
369 			continue;
370 		if (end > max_low_pfn)
371 			end = max_low_pfn;
372 		if (start < start_kernel_pfn) {
373 			if (end > end_kernel_pfn
374 			    && end - end_kernel_pfn >= bootmap_pages) {
375 				bootmap_start = end_kernel_pfn;
376 				break;
377 			} else if (end > start_kernel_pfn)
378 				end = start_kernel_pfn;
379 		} else if (start < end_kernel_pfn)
380 			start = end_kernel_pfn;
381 		if (end - start >= bootmap_pages) {
382 			bootmap_start = start;
383 			break;
384 		}
385 	}
386 
387 	if (bootmap_start == ~0UL) {
388 		max_low_pfn >>= 1;
389 		goto try_again;
390 	}
391 
392 	/* Allocate the bootmap and mark the whole MM as reserved.  */
393 	bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
394 
395 	/* Mark the free regions.  */
396 	for_each_mem_cluster(memdesc, cluster, i) {
397 		if (cluster->usage & 3)
398 			continue;
399 
400 		start = cluster->start_pfn;
401 		end = cluster->start_pfn + cluster->numpages;
402 		if (start >= max_low_pfn)
403 			continue;
404 		if (end > max_low_pfn)
405 			end = max_low_pfn;
406 		if (start < start_kernel_pfn) {
407 			if (end > end_kernel_pfn) {
408 				free_bootmem(PFN_PHYS(start),
409 					     (PFN_PHYS(start_kernel_pfn)
410 					      - PFN_PHYS(start)));
411 				printk("freeing pages %ld:%ld\n",
412 				       start, start_kernel_pfn);
413 				start = end_kernel_pfn;
414 			} else if (end > start_kernel_pfn)
415 				end = start_kernel_pfn;
416 		} else if (start < end_kernel_pfn)
417 			start = end_kernel_pfn;
418 		if (start >= end)
419 			continue;
420 
421 		free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
422 		printk("freeing pages %ld:%ld\n", start, end);
423 	}
424 
425 	/* Reserve the bootmap memory.  */
426 	reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
427 	printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
428 
429 #ifdef CONFIG_BLK_DEV_INITRD
430 	initrd_start = INITRD_START;
431 	if (initrd_start) {
432 		initrd_end = initrd_start+INITRD_SIZE;
433 		printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
434 		       (void *) initrd_start, INITRD_SIZE);
435 
436 		if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
437 			if (!move_initrd(PFN_PHYS(max_low_pfn)))
438 				printk("initrd extends beyond end of memory "
439 				       "(0x%08lx > 0x%p)\ndisabling initrd\n",
440 				       initrd_end,
441 				       phys_to_virt(PFN_PHYS(max_low_pfn)));
442 		} else {
443 			reserve_bootmem(virt_to_phys((void *)initrd_start),
444 					INITRD_SIZE);
445 		}
446 	}
447 #endif /* CONFIG_BLK_DEV_INITRD */
448 }
449 #else
450 extern void setup_memory(void *);
451 #endif /* !CONFIG_DISCONTIGMEM */
452 
453 int __init
454 page_is_ram(unsigned long pfn)
455 {
456 	struct memclust_struct * cluster;
457 	struct memdesc_struct * memdesc;
458 	unsigned long i;
459 
460 	memdesc = (struct memdesc_struct *)
461 		(hwrpb->mddt_offset + (unsigned long) hwrpb);
462 	for_each_mem_cluster(memdesc, cluster, i)
463 	{
464 		if (pfn >= cluster->start_pfn  &&
465 		    pfn < cluster->start_pfn + cluster->numpages) {
466 			return (cluster->usage & 3) ? 0 : 1;
467 		}
468 	}
469 
470 	return 0;
471 }
472 
473 void __init
474 setup_arch(char **cmdline_p)
475 {
476 	extern char _end[];
477 
478 	struct alpha_machine_vector *vec = NULL;
479 	struct percpu_struct *cpu;
480 	char *type_name, *var_name, *p;
481 	void *kernel_end = _end; /* end of kernel */
482 	char *args = command_line;
483 
484 	hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
485 	boot_cpuid = hard_smp_processor_id();
486 
487         /*
488 	 * Pre-process the system type to make sure it will be valid.
489 	 *
490 	 * This may restore real CABRIO and EB66+ family names, ie
491 	 * EB64+ and EB66.
492 	 *
493 	 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
494 	 * and AS1200 (DIGITAL Server 5000 series) have the type as
495 	 * the negative of the real one.
496 	 */
497         if ((long)hwrpb->sys_type < 0) {
498 		hwrpb->sys_type = -((long)hwrpb->sys_type);
499 		hwrpb_update_checksum(hwrpb);
500 	}
501 
502 	/* Register a call for panic conditions. */
503 	atomic_notifier_chain_register(&panic_notifier_list,
504 			&alpha_panic_block);
505 
506 #ifdef CONFIG_ALPHA_GENERIC
507 	/* Assume that we've booted from SRM if we haven't booted from MILO.
508 	   Detect the later by looking for "MILO" in the system serial nr.  */
509 	alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
510 #endif
511 
512 	/* If we are using SRM, we want to allow callbacks
513 	   as early as possible, so do this NOW, and then
514 	   they should work immediately thereafter.
515 	*/
516 	kernel_end = callback_init(kernel_end);
517 
518 	/*
519 	 * Locate the command line.
520 	 */
521 	/* Hack for Jensen... since we're restricted to 8 or 16 chars for
522 	   boot flags depending on the boot mode, we need some shorthand.
523 	   This should do for installation.  */
524 	if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
525 		strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
526 	} else {
527 		strlcpy(command_line, COMMAND_LINE, sizeof command_line);
528 	}
529 	strcpy(saved_command_line, command_line);
530 	*cmdline_p = command_line;
531 
532 	/*
533 	 * Process command-line arguments.
534 	 */
535 	while ((p = strsep(&args, " \t")) != NULL) {
536 		if (!*p) continue;
537 		if (strncmp(p, "alpha_mv=", 9) == 0) {
538 			vec = get_sysvec_byname(p+9);
539 			continue;
540 		}
541 		if (strncmp(p, "cycle=", 6) == 0) {
542 			est_cycle_freq = simple_strtol(p+6, NULL, 0);
543 			continue;
544 		}
545 		if (strncmp(p, "mem=", 4) == 0) {
546 			mem_size_limit = get_mem_size_limit(p+4);
547 			continue;
548 		}
549 		if (strncmp(p, "srmcons", 7) == 0) {
550 			srmcons_output |= 1;
551 			continue;
552 		}
553 		if (strncmp(p, "console=srm", 11) == 0) {
554 			srmcons_output |= 2;
555 			continue;
556 		}
557 		if (strncmp(p, "gartsize=", 9) == 0) {
558 			alpha_agpgart_size =
559 				get_mem_size_limit(p+9) << PAGE_SHIFT;
560 			continue;
561 		}
562 #ifdef CONFIG_VERBOSE_MCHECK
563 		if (strncmp(p, "verbose_mcheck=", 15) == 0) {
564 			alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
565 			continue;
566 		}
567 #endif
568 	}
569 
570 	/* Replace the command line, now that we've killed it with strsep.  */
571 	strcpy(command_line, saved_command_line);
572 
573 	/* If we want SRM console printk echoing early, do it now. */
574 	if (alpha_using_srm && srmcons_output) {
575 		register_srm_console();
576 
577 		/*
578 		 * If "console=srm" was specified, clear the srmcons_output
579 		 * flag now so that time.c won't unregister_srm_console
580 		 */
581 		if (srmcons_output & 2)
582 			srmcons_output = 0;
583 	}
584 
585 #ifdef CONFIG_MAGIC_SYSRQ
586 	/* If we're using SRM, make sysrq-b halt back to the prom,
587 	   not auto-reboot.  */
588 	if (alpha_using_srm) {
589 		struct sysrq_key_op *op = __sysrq_get_key_op('b');
590 		op->handler = (void *) machine_halt;
591 	}
592 #endif
593 
594 	/*
595 	 * Identify and reconfigure for the current system.
596 	 */
597 	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
598 
599 	get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
600 		     cpu->type, &type_name, &var_name);
601 	if (*var_name == '0')
602 		var_name = "";
603 
604 	if (!vec) {
605 		vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
606 				 cpu->type);
607 	}
608 
609 	if (!vec) {
610 		panic("Unsupported system type: %s%s%s (%ld %ld)\n",
611 		      type_name, (*var_name ? " variation " : ""), var_name,
612 		      hwrpb->sys_type, hwrpb->sys_variation);
613 	}
614 	if (vec != &alpha_mv) {
615 		alpha_mv = *vec;
616 	}
617 
618 	printk("Booting "
619 #ifdef CONFIG_ALPHA_GENERIC
620 	       "GENERIC "
621 #endif
622 	       "on %s%s%s using machine vector %s from %s\n",
623 	       type_name, (*var_name ? " variation " : ""),
624 	       var_name, alpha_mv.vector_name,
625 	       (alpha_using_srm ? "SRM" : "MILO"));
626 
627 	printk("Major Options: "
628 #ifdef CONFIG_SMP
629 	       "SMP "
630 #endif
631 #ifdef CONFIG_ALPHA_EV56
632 	       "EV56 "
633 #endif
634 #ifdef CONFIG_ALPHA_EV67
635 	       "EV67 "
636 #endif
637 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
638 	       "LEGACY_START "
639 #endif
640 #ifdef CONFIG_VERBOSE_MCHECK
641 	       "VERBOSE_MCHECK "
642 #endif
643 
644 #ifdef CONFIG_DISCONTIGMEM
645 	       "DISCONTIGMEM "
646 #ifdef CONFIG_NUMA
647 	       "NUMA "
648 #endif
649 #endif
650 
651 #ifdef CONFIG_DEBUG_SPINLOCK
652 	       "DEBUG_SPINLOCK "
653 #endif
654 #ifdef CONFIG_MAGIC_SYSRQ
655 	       "MAGIC_SYSRQ "
656 #endif
657 	       "\n");
658 
659 	printk("Command line: %s\n", command_line);
660 
661 	/*
662 	 * Sync up the HAE.
663 	 * Save the SRM's current value for restoration.
664 	 */
665 	srm_hae = *alpha_mv.hae_register;
666 	__set_hae(alpha_mv.hae_cache);
667 
668 	/* Reset enable correctable error reports.  */
669 	wrmces(0x7);
670 
671 	/* Find our memory.  */
672 	setup_memory(kernel_end);
673 
674 	/* First guess at cpu cache sizes.  Do this before init_arch.  */
675 	determine_cpu_caches(cpu->type);
676 
677 	/* Initialize the machine.  Usually has to do with setting up
678 	   DMA windows and the like.  */
679 	if (alpha_mv.init_arch)
680 		alpha_mv.init_arch();
681 
682 	/* Reserve standard resources.  */
683 	reserve_std_resources();
684 
685 	/*
686 	 * Give us a default console.  TGA users will see nothing until
687 	 * chr_dev_init is called, rather late in the boot sequence.
688 	 */
689 
690 #ifdef CONFIG_VT
691 #if defined(CONFIG_VGA_CONSOLE)
692 	conswitchp = &vga_con;
693 #elif defined(CONFIG_DUMMY_CONSOLE)
694 	conswitchp = &dummy_con;
695 #endif
696 #endif
697 
698 	/* Default root filesystem to sda2.  */
699 	ROOT_DEV = Root_SDA2;
700 
701 #ifdef CONFIG_EISA
702 	/* FIXME:  only set this when we actually have EISA in this box? */
703 	EISA_bus = 1;
704 #endif
705 
706  	/*
707 	 * Check ASN in HWRPB for validity, report if bad.
708 	 * FIXME: how was this failing?  Should we trust it instead,
709 	 * and copy the value into alpha_mv.max_asn?
710  	 */
711 
712  	if (hwrpb->max_asn != MAX_ASN) {
713 		printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
714  	}
715 
716 	/*
717 	 * Identify the flock of penguins.
718 	 */
719 
720 #ifdef CONFIG_SMP
721 	setup_smp();
722 #endif
723 	paging_init();
724 }
725 
726 void __init
727 disable_early_printk(void)
728 {
729 	if (alpha_using_srm && srmcons_output) {
730 		unregister_srm_console();
731 		srmcons_output = 0;
732 	}
733 }
734 
735 static char sys_unknown[] = "Unknown";
736 static char systype_names[][16] = {
737 	"0",
738 	"ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
739 	"Pelican", "Morgan", "Sable", "Medulla", "Noname",
740 	"Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
741 	"Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
742 	"Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
743 	"Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
744 	"Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
745 };
746 
747 static char unofficial_names[][8] = {"100", "Ruffian"};
748 
749 static char api_names[][16] = {"200", "Nautilus"};
750 
751 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
752 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
753 
754 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
755 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
756 
757 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
758 static int eb64p_indices[] = {0,0,1,2};
759 
760 static char eb66_names[][8] = {"EB66", "EB66+"};
761 static int eb66_indices[] = {0,0,1};
762 
763 static char marvel_names[][16] = {
764 	"Marvel/EV7"
765 };
766 static int marvel_indices[] = { 0 };
767 
768 static char rawhide_names[][16] = {
769 	"Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
770 };
771 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
772 
773 static char titan_names[][16] = {
774 	"DEFAULT", "Privateer", "Falcon", "Granite"
775 };
776 static int titan_indices[] = {0,1,2,2,3};
777 
778 static char tsunami_names[][16] = {
779 	"0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
780 	"Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
781 	"Flying Clipper", "Shark"
782 };
783 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
784 
785 static struct alpha_machine_vector * __init
786 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
787 {
788 	static struct alpha_machine_vector *systype_vecs[] __initdata =
789 	{
790 		NULL,		/* 0 */
791 		NULL,		/* ADU */
792 		NULL,		/* Cobra */
793 		NULL,		/* Ruby */
794 		NULL,		/* Flamingo */
795 		NULL,		/* Mannequin */
796 		&jensen_mv,
797 		NULL, 		/* Pelican */
798 		NULL,		/* Morgan */
799 		NULL,		/* Sable -- see below.  */
800 		NULL,		/* Medulla */
801 		&noname_mv,
802 		NULL,		/* Turbolaser */
803 		&avanti_mv,
804 		NULL,		/* Mustang */
805 		NULL,		/* Alcor, Bret, Maverick. HWRPB inaccurate? */
806 		NULL,		/* Tradewind */
807 		NULL,		/* Mikasa -- see below.  */
808 		NULL,		/* EB64 */
809 		NULL,		/* EB66 -- see variation.  */
810 		NULL,		/* EB64+ -- see variation.  */
811 		&alphabook1_mv,
812 		&rawhide_mv,
813 		NULL,		/* K2 */
814 		&lynx_mv,	/* Lynx */
815 		&xl_mv,
816 		NULL,		/* EB164 -- see variation.  */
817 		NULL,		/* Noritake -- see below.  */
818 		NULL,		/* Cortex */
819 		NULL,		/* 29 */
820 		&miata_mv,
821 		NULL,		/* XXM */
822 		&takara_mv,
823 		NULL,		/* Yukon */
824 		NULL,		/* Tsunami -- see variation.  */
825 		&wildfire_mv,	/* Wildfire */
826 		NULL,		/* CUSCO */
827 		&eiger_mv,	/* Eiger */
828 		NULL,		/* Titan */
829 		NULL,		/* Marvel */
830 	};
831 
832 	static struct alpha_machine_vector *unofficial_vecs[] __initdata =
833 	{
834 		NULL,		/* 100 */
835 		&ruffian_mv,
836 	};
837 
838 	static struct alpha_machine_vector *api_vecs[] __initdata =
839 	{
840 		NULL,		/* 200 */
841 		&nautilus_mv,
842 	};
843 
844 	static struct alpha_machine_vector *alcor_vecs[] __initdata =
845 	{
846 		&alcor_mv, &xlt_mv, &xlt_mv
847 	};
848 
849 	static struct alpha_machine_vector *eb164_vecs[] __initdata =
850 	{
851 		&eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
852 	};
853 
854 	static struct alpha_machine_vector *eb64p_vecs[] __initdata =
855 	{
856 		&eb64p_mv,
857 		&cabriolet_mv,
858 		&cabriolet_mv		/* AlphaPCI64 */
859 	};
860 
861 	static struct alpha_machine_vector *eb66_vecs[] __initdata =
862 	{
863 		&eb66_mv,
864 		&eb66p_mv
865 	};
866 
867 	static struct alpha_machine_vector *marvel_vecs[] __initdata =
868 	{
869 		&marvel_ev7_mv,
870 	};
871 
872 	static struct alpha_machine_vector *titan_vecs[] __initdata =
873 	{
874 		&titan_mv,		/* default   */
875 		&privateer_mv,		/* privateer */
876 		&titan_mv,		/* falcon    */
877 		&privateer_mv,		/* granite   */
878 	};
879 
880 	static struct alpha_machine_vector *tsunami_vecs[]  __initdata =
881 	{
882 		NULL,
883 		&dp264_mv,		/* dp264 */
884 		&dp264_mv,		/* warhol */
885 		&dp264_mv,		/* windjammer */
886 		&monet_mv,		/* monet */
887 		&clipper_mv,		/* clipper */
888 		&dp264_mv,		/* goldrush */
889 		&webbrick_mv,		/* webbrick */
890 		&dp264_mv,		/* catamaran */
891 		NULL,			/* brisbane? */
892 		NULL,			/* melbourne? */
893 		NULL,			/* flying clipper? */
894 		&shark_mv,		/* shark */
895 	};
896 
897 	/* ??? Do we need to distinguish between Rawhides?  */
898 
899 	struct alpha_machine_vector *vec;
900 
901 	/* Search the system tables first... */
902 	vec = NULL;
903 	if (type < N(systype_vecs)) {
904 		vec = systype_vecs[type];
905 	} else if ((type > ST_API_BIAS) &&
906 		   (type - ST_API_BIAS) < N(api_vecs)) {
907 		vec = api_vecs[type - ST_API_BIAS];
908 	} else if ((type > ST_UNOFFICIAL_BIAS) &&
909 		   (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
910 		vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
911 	}
912 
913 	/* If we've not found one, try for a variation.  */
914 
915 	if (!vec) {
916 		/* Member ID is a bit-field. */
917 		unsigned long member = (variation >> 10) & 0x3f;
918 
919 		cpu &= 0xffffffff; /* make it usable */
920 
921 		switch (type) {
922 		case ST_DEC_ALCOR:
923 			if (member < N(alcor_indices))
924 				vec = alcor_vecs[alcor_indices[member]];
925 			break;
926 		case ST_DEC_EB164:
927 			if (member < N(eb164_indices))
928 				vec = eb164_vecs[eb164_indices[member]];
929 			/* PC164 may show as EB164 variation with EV56 CPU,
930 			   but, since no true EB164 had anything but EV5... */
931 			if (vec == &eb164_mv && cpu == EV56_CPU)
932 				vec = &pc164_mv;
933 			break;
934 		case ST_DEC_EB64P:
935 			if (member < N(eb64p_indices))
936 				vec = eb64p_vecs[eb64p_indices[member]];
937 			break;
938 		case ST_DEC_EB66:
939 			if (member < N(eb66_indices))
940 				vec = eb66_vecs[eb66_indices[member]];
941 			break;
942 		case ST_DEC_MARVEL:
943 			if (member < N(marvel_indices))
944 				vec = marvel_vecs[marvel_indices[member]];
945 			break;
946 		case ST_DEC_TITAN:
947 			vec = titan_vecs[0];	/* default */
948 			if (member < N(titan_indices))
949 				vec = titan_vecs[titan_indices[member]];
950 			break;
951 		case ST_DEC_TSUNAMI:
952 			if (member < N(tsunami_indices))
953 				vec = tsunami_vecs[tsunami_indices[member]];
954 			break;
955 		case ST_DEC_1000:
956 			if (cpu == EV5_CPU || cpu == EV56_CPU)
957 				vec = &mikasa_primo_mv;
958 			else
959 				vec = &mikasa_mv;
960 			break;
961 		case ST_DEC_NORITAKE:
962 			if (cpu == EV5_CPU || cpu == EV56_CPU)
963 				vec = &noritake_primo_mv;
964 			else
965 				vec = &noritake_mv;
966 			break;
967 		case ST_DEC_2100_A500:
968 			if (cpu == EV5_CPU || cpu == EV56_CPU)
969 				vec = &sable_gamma_mv;
970 			else
971 				vec = &sable_mv;
972 			break;
973 		}
974 	}
975 	return vec;
976 }
977 
978 static struct alpha_machine_vector * __init
979 get_sysvec_byname(const char *name)
980 {
981 	static struct alpha_machine_vector *all_vecs[] __initdata =
982 	{
983 		&alcor_mv,
984 		&alphabook1_mv,
985 		&avanti_mv,
986 		&cabriolet_mv,
987 		&clipper_mv,
988 		&dp264_mv,
989 		&eb164_mv,
990 		&eb64p_mv,
991 		&eb66_mv,
992 		&eb66p_mv,
993 		&eiger_mv,
994 		&jensen_mv,
995 		&lx164_mv,
996 		&lynx_mv,
997 		&miata_mv,
998 		&mikasa_mv,
999 		&mikasa_primo_mv,
1000 		&monet_mv,
1001 		&nautilus_mv,
1002 		&noname_mv,
1003 		&noritake_mv,
1004 		&noritake_primo_mv,
1005 		&p2k_mv,
1006 		&pc164_mv,
1007 		&privateer_mv,
1008 		&rawhide_mv,
1009 		&ruffian_mv,
1010 		&rx164_mv,
1011 		&sable_mv,
1012 		&sable_gamma_mv,
1013 		&shark_mv,
1014 		&sx164_mv,
1015 		&takara_mv,
1016 		&webbrick_mv,
1017 		&wildfire_mv,
1018 		&xl_mv,
1019 		&xlt_mv
1020 	};
1021 
1022 	size_t i;
1023 
1024 	for (i = 0; i < N(all_vecs); ++i) {
1025 		struct alpha_machine_vector *mv = all_vecs[i];
1026 		if (strcasecmp(mv->vector_name, name) == 0)
1027 			return mv;
1028 	}
1029 	return NULL;
1030 }
1031 
1032 static void
1033 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1034 	     char **type_name, char **variation_name)
1035 {
1036 	unsigned long member;
1037 
1038 	/* If not in the tables, make it UNKNOWN,
1039 	   else set type name to family */
1040 	if (type < N(systype_names)) {
1041 		*type_name = systype_names[type];
1042 	} else if ((type > ST_API_BIAS) &&
1043 		   (type - ST_API_BIAS) < N(api_names)) {
1044 		*type_name = api_names[type - ST_API_BIAS];
1045 	} else if ((type > ST_UNOFFICIAL_BIAS) &&
1046 		   (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
1047 		*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1048 	} else {
1049 		*type_name = sys_unknown;
1050 		*variation_name = sys_unknown;
1051 		return;
1052 	}
1053 
1054 	/* Set variation to "0"; if variation is zero, done.  */
1055 	*variation_name = systype_names[0];
1056 	if (variation == 0) {
1057 		return;
1058 	}
1059 
1060 	member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1061 
1062 	cpu &= 0xffffffff; /* make it usable */
1063 
1064 	switch (type) { /* select by family */
1065 	default: /* default to variation "0" for now */
1066 		break;
1067 	case ST_DEC_EB164:
1068 		if (member < N(eb164_indices))
1069 			*variation_name = eb164_names[eb164_indices[member]];
1070 		/* PC164 may show as EB164 variation, but with EV56 CPU,
1071 		   so, since no true EB164 had anything but EV5... */
1072 		if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1073 			*variation_name = eb164_names[1]; /* make it PC164 */
1074 		break;
1075 	case ST_DEC_ALCOR:
1076 		if (member < N(alcor_indices))
1077 			*variation_name = alcor_names[alcor_indices[member]];
1078 		break;
1079 	case ST_DEC_EB64P:
1080 		if (member < N(eb64p_indices))
1081 			*variation_name = eb64p_names[eb64p_indices[member]];
1082 		break;
1083 	case ST_DEC_EB66:
1084 		if (member < N(eb66_indices))
1085 			*variation_name = eb66_names[eb66_indices[member]];
1086 		break;
1087 	case ST_DEC_MARVEL:
1088 		if (member < N(marvel_indices))
1089 			*variation_name = marvel_names[marvel_indices[member]];
1090 		break;
1091 	case ST_DEC_RAWHIDE:
1092 		if (member < N(rawhide_indices))
1093 			*variation_name = rawhide_names[rawhide_indices[member]];
1094 		break;
1095 	case ST_DEC_TITAN:
1096 		*variation_name = titan_names[0];	/* default */
1097 		if (member < N(titan_indices))
1098 			*variation_name = titan_names[titan_indices[member]];
1099 		break;
1100 	case ST_DEC_TSUNAMI:
1101 		if (member < N(tsunami_indices))
1102 			*variation_name = tsunami_names[tsunami_indices[member]];
1103 		break;
1104 	}
1105 }
1106 
1107 /*
1108  * A change was made to the HWRPB via an ECO and the following code
1109  * tracks a part of the ECO.  In HWRPB versions less than 5, the ECO
1110  * was not implemented in the console firmware.  If it's revision 5 or
1111  * greater we can get the name of the platform as an ASCII string from
1112  * the HWRPB.  That's what this function does.  It checks the revision
1113  * level and if the string is in the HWRPB it returns the address of
1114  * the string--a pointer to the name of the platform.
1115  *
1116  * Returns:
1117  *      - Pointer to a ASCII string if it's in the HWRPB
1118  *      - Pointer to a blank string if the data is not in the HWRPB.
1119  */
1120 
1121 static char *
1122 platform_string(void)
1123 {
1124 	struct dsr_struct *dsr;
1125 	static char unk_system_string[] = "N/A";
1126 
1127 	/* Go to the console for the string pointer.
1128 	 * If the rpb_vers is not 5 or greater the rpb
1129 	 * is old and does not have this data in it.
1130 	 */
1131 	if (hwrpb->revision < 5)
1132 		return (unk_system_string);
1133 	else {
1134 		/* The Dynamic System Recognition struct
1135 		 * has the system platform name starting
1136 		 * after the character count of the string.
1137 		 */
1138 		dsr =  ((struct dsr_struct *)
1139 			((char *)hwrpb + hwrpb->dsr_offset));
1140 		return ((char *)dsr + (dsr->sysname_off +
1141 				       sizeof(long)));
1142 	}
1143 }
1144 
1145 static int
1146 get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1147 {
1148 	struct percpu_struct *cpu;
1149 	unsigned long i;
1150 	int count = 0;
1151 
1152 	for (i = 0; i < num; i++) {
1153 		cpu = (struct percpu_struct *)
1154 			((char *)cpubase + i*hwrpb->processor_size);
1155 		if ((cpu->flags & 0x1cc) == 0x1cc)
1156 			count++;
1157 	}
1158 	return count;
1159 }
1160 
1161 static void
1162 show_cache_size (struct seq_file *f, const char *which, int shape)
1163 {
1164 	if (shape == -1)
1165 		seq_printf (f, "%s\t\t: n/a\n", which);
1166 	else if (shape == 0)
1167 		seq_printf (f, "%s\t\t: unknown\n", which);
1168 	else
1169 		seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1170 			    which, shape >> 10, shape & 15,
1171 			    1 << ((shape >> 4) & 15));
1172 }
1173 
1174 static int
1175 show_cpuinfo(struct seq_file *f, void *slot)
1176 {
1177 	extern struct unaligned_stat {
1178 		unsigned long count, va, pc;
1179 	} unaligned[2];
1180 
1181 	static char cpu_names[][8] = {
1182 		"EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1183 		"EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1184 		"EV68CX", "EV7", "EV79", "EV69"
1185 	};
1186 
1187 	struct percpu_struct *cpu = slot;
1188 	unsigned int cpu_index;
1189 	char *cpu_name;
1190 	char *systype_name;
1191 	char *sysvariation_name;
1192 	int nr_processors;
1193 
1194 	cpu_index = (unsigned) (cpu->type - 1);
1195 	cpu_name = "Unknown";
1196 	if (cpu_index < N(cpu_names))
1197 		cpu_name = cpu_names[cpu_index];
1198 
1199 	get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1200 		     cpu->type, &systype_name, &sysvariation_name);
1201 
1202 	nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1203 
1204 	seq_printf(f, "cpu\t\t\t: Alpha\n"
1205 		      "cpu model\t\t: %s\n"
1206 		      "cpu variation\t\t: %ld\n"
1207 		      "cpu revision\t\t: %ld\n"
1208 		      "cpu serial number\t: %s\n"
1209 		      "system type\t\t: %s\n"
1210 		      "system variation\t: %s\n"
1211 		      "system revision\t\t: %ld\n"
1212 		      "system serial number\t: %s\n"
1213 		      "cycle frequency [Hz]\t: %lu %s\n"
1214 		      "timer frequency [Hz]\t: %lu.%02lu\n"
1215 		      "page size [bytes]\t: %ld\n"
1216 		      "phys. address bits\t: %ld\n"
1217 		      "max. addr. space #\t: %ld\n"
1218 		      "BogoMIPS\t\t: %lu.%02lu\n"
1219 		      "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1220 		      "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1221 		      "platform string\t\t: %s\n"
1222 		      "cpus detected\t\t: %d\n",
1223 		       cpu_name, cpu->variation, cpu->revision,
1224 		       (char*)cpu->serial_no,
1225 		       systype_name, sysvariation_name, hwrpb->sys_revision,
1226 		       (char*)hwrpb->ssn,
1227 		       est_cycle_freq ? : hwrpb->cycle_freq,
1228 		       est_cycle_freq ? "est." : "",
1229 		       hwrpb->intr_freq / 4096,
1230 		       (100 * hwrpb->intr_freq / 4096) % 100,
1231 		       hwrpb->pagesize,
1232 		       hwrpb->pa_bits,
1233 		       hwrpb->max_asn,
1234 		       loops_per_jiffy / (500000/HZ),
1235 		       (loops_per_jiffy / (5000/HZ)) % 100,
1236 		       unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1237 		       unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1238 		       platform_string(), nr_processors);
1239 
1240 #ifdef CONFIG_SMP
1241 	seq_printf(f, "cpus active\t\t: %d\n"
1242 		      "cpu active mask\t\t: %016lx\n",
1243 		       num_online_cpus(), cpus_addr(cpu_possible_map)[0]);
1244 #endif
1245 
1246 	show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1247 	show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1248 	show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1249 	show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1250 
1251 	return 0;
1252 }
1253 
1254 static int __init
1255 read_mem_block(int *addr, int stride, int size)
1256 {
1257 	long nloads = size / stride, cnt, tmp;
1258 
1259 	__asm__ __volatile__(
1260 	"	rpcc    %0\n"
1261 	"1:	ldl	%3,0(%2)\n"
1262 	"	subq	%1,1,%1\n"
1263 	/* Next two XORs introduce an explicit data dependency between
1264 	   consecutive loads in the loop, which will give us true load
1265 	   latency. */
1266 	"	xor	%3,%2,%2\n"
1267 	"	xor	%3,%2,%2\n"
1268 	"	addq	%2,%4,%2\n"
1269 	"	bne	%1,1b\n"
1270 	"	rpcc	%3\n"
1271 	"	subl	%3,%0,%0\n"
1272 	: "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1273 	: "r" (stride), "1" (nloads), "2" (addr));
1274 
1275 	return cnt / (size / stride);
1276 }
1277 
1278 #define CSHAPE(totalsize, linesize, assoc) \
1279   ((totalsize & ~0xff) | (linesize << 4) | assoc)
1280 
1281 /* ??? EV5 supports up to 64M, but did the systems with more than
1282    16M of BCACHE ever exist? */
1283 #define MAX_BCACHE_SIZE	16*1024*1024
1284 
1285 /* Note that the offchip caches are direct mapped on all Alphas. */
1286 static int __init
1287 external_cache_probe(int minsize, int width)
1288 {
1289 	int cycles, prev_cycles = 1000000;
1290 	int stride = 1 << width;
1291 	long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1292 
1293 	if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1294 		maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT);
1295 
1296 	/* Get the first block cached. */
1297 	read_mem_block(__va(0), stride, size);
1298 
1299 	while (size < maxsize) {
1300 		/* Get an average load latency in cycles. */
1301 		cycles = read_mem_block(__va(0), stride, size);
1302 		if (cycles > prev_cycles * 2) {
1303 			/* Fine, we exceed the cache. */
1304 			printk("%ldK Bcache detected; load hit latency %d "
1305 			       "cycles, load miss latency %d cycles\n",
1306 			       size >> 11, prev_cycles, cycles);
1307 			return CSHAPE(size >> 1, width, 1);
1308 		}
1309 		/* Try to get the next block cached. */
1310 		read_mem_block(__va(size), stride, size);
1311 		prev_cycles = cycles;
1312 		size <<= 1;
1313 	}
1314 	return -1;	/* No BCACHE found. */
1315 }
1316 
1317 static void __init
1318 determine_cpu_caches (unsigned int cpu_type)
1319 {
1320 	int L1I, L1D, L2, L3;
1321 
1322 	switch (cpu_type) {
1323 	case EV4_CPU:
1324 	case EV45_CPU:
1325 	  {
1326 		if (cpu_type == EV4_CPU)
1327 			L1I = CSHAPE(8*1024, 5, 1);
1328 		else
1329 			L1I = CSHAPE(16*1024, 5, 1);
1330 		L1D = L1I;
1331 		L3 = -1;
1332 
1333 		/* BIU_CTL is a write-only Abox register.  PALcode has a
1334 		   shadow copy, and may be available from some versions
1335 		   of the CSERVE PALcall.  If we can get it, then
1336 
1337 			unsigned long biu_ctl, size;
1338 			size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1339 			L2 = CSHAPE (size, 5, 1);
1340 
1341 		   Unfortunately, we can't rely on that.
1342 		*/
1343 		L2 = external_cache_probe(128*1024, 5);
1344 		break;
1345 	  }
1346 
1347 	case LCA4_CPU:
1348 	  {
1349 		unsigned long car, size;
1350 
1351 		L1I = L1D = CSHAPE(8*1024, 5, 1);
1352 		L3 = -1;
1353 
1354 		car = *(vuip) phys_to_virt (0x120000078UL);
1355 		size = 64*1024 * (1 << ((car >> 5) & 7));
1356 		/* No typo -- 8 byte cacheline size.  Whodathunk.  */
1357 		L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1358 		break;
1359 	  }
1360 
1361 	case EV5_CPU:
1362 	case EV56_CPU:
1363 	  {
1364 		unsigned long sc_ctl, width;
1365 
1366 		L1I = L1D = CSHAPE(8*1024, 5, 1);
1367 
1368 		/* Check the line size of the Scache.  */
1369 		sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1370 		width = sc_ctl & 0x1000 ? 6 : 5;
1371 		L2 = CSHAPE (96*1024, width, 3);
1372 
1373 		/* BC_CONTROL and BC_CONFIG are write-only IPRs.  PALcode
1374 		   has a shadow copy, and may be available from some versions
1375 		   of the CSERVE PALcall.  If we can get it, then
1376 
1377 			unsigned long bc_control, bc_config, size;
1378 			size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1379 			L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1380 
1381 		   Unfortunately, we can't rely on that.
1382 		*/
1383 		L3 = external_cache_probe(1024*1024, width);
1384 		break;
1385 	  }
1386 
1387 	case PCA56_CPU:
1388 	case PCA57_CPU:
1389 	  {
1390 		unsigned long cbox_config, size;
1391 
1392 		if (cpu_type == PCA56_CPU) {
1393 			L1I = CSHAPE(16*1024, 6, 1);
1394 			L1D = CSHAPE(8*1024, 5, 1);
1395 		} else {
1396 			L1I = CSHAPE(32*1024, 6, 2);
1397 			L1D = CSHAPE(16*1024, 5, 1);
1398 		}
1399 		L3 = -1;
1400 
1401 		cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1402 		size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1403 
1404 #if 0
1405 		L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1406 #else
1407 		L2 = external_cache_probe(512*1024, 6);
1408 #endif
1409 		break;
1410 	  }
1411 
1412 	case EV6_CPU:
1413 	case EV67_CPU:
1414 	case EV68CB_CPU:
1415 	case EV68AL_CPU:
1416 	case EV68CX_CPU:
1417 	case EV69_CPU:
1418 		L1I = L1D = CSHAPE(64*1024, 6, 2);
1419 		L2 = external_cache_probe(1024*1024, 6);
1420 		L3 = -1;
1421 		break;
1422 
1423 	case EV7_CPU:
1424 	case EV79_CPU:
1425 		L1I = L1D = CSHAPE(64*1024, 6, 2);
1426 		L2 = CSHAPE(7*1024*1024/4, 6, 7);
1427 		L3 = -1;
1428 		break;
1429 
1430 	default:
1431 		/* Nothing known about this cpu type.  */
1432 		L1I = L1D = L2 = L3 = 0;
1433 		break;
1434 	}
1435 
1436 	alpha_l1i_cacheshape = L1I;
1437 	alpha_l1d_cacheshape = L1D;
1438 	alpha_l2_cacheshape = L2;
1439 	alpha_l3_cacheshape = L3;
1440 }
1441 
1442 /*
1443  * We show only CPU #0 info.
1444  */
1445 static void *
1446 c_start(struct seq_file *f, loff_t *pos)
1447 {
1448 	return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1449 }
1450 
1451 static void *
1452 c_next(struct seq_file *f, void *v, loff_t *pos)
1453 {
1454 	return NULL;
1455 }
1456 
1457 static void
1458 c_stop(struct seq_file *f, void *v)
1459 {
1460 }
1461 
1462 struct seq_operations cpuinfo_op = {
1463 	.start	= c_start,
1464 	.next	= c_next,
1465 	.stop	= c_stop,
1466 	.show	= show_cpuinfo,
1467 };
1468 
1469 
1470 static int
1471 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1472 {
1473 #if 1
1474 	/* FIXME FIXME FIXME */
1475 	/* If we are using SRM and serial console, just hard halt here. */
1476 	if (alpha_using_srm && srmcons_output)
1477 		__halt();
1478 #endif
1479         return NOTIFY_DONE;
1480 }
1481