1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/alpha/kernel/setup.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 */ 7 8 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */ 9 10 /* 11 * Bootup setup stuff. 12 */ 13 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/stddef.h> 18 #include <linux/unistd.h> 19 #include <linux/ptrace.h> 20 #include <linux/slab.h> 21 #include <linux/user.h> 22 #include <linux/screen_info.h> 23 #include <linux/delay.h> 24 #include <linux/mc146818rtc.h> 25 #include <linux/console.h> 26 #include <linux/cpu.h> 27 #include <linux/errno.h> 28 #include <linux/init.h> 29 #include <linux/string.h> 30 #include <linux/ioport.h> 31 #include <linux/panic_notifier.h> 32 #include <linux/platform_device.h> 33 #include <linux/memblock.h> 34 #include <linux/pci.h> 35 #include <linux/seq_file.h> 36 #include <linux/root_dev.h> 37 #include <linux/initrd.h> 38 #include <linux/eisa.h> 39 #include <linux/pfn.h> 40 #ifdef CONFIG_MAGIC_SYSRQ 41 #include <linux/sysrq.h> 42 #include <linux/reboot.h> 43 #endif 44 #include <linux/notifier.h> 45 #include <asm/setup.h> 46 #include <asm/io.h> 47 #include <linux/log2.h> 48 #include <linux/export.h> 49 50 static int alpha_panic_event(struct notifier_block *, unsigned long, void *); 51 static struct notifier_block alpha_panic_block = { 52 alpha_panic_event, 53 NULL, 54 INT_MAX /* try to do it first */ 55 }; 56 57 #include <linux/uaccess.h> 58 #include <asm/hwrpb.h> 59 #include <asm/dma.h> 60 #include <asm/mmu_context.h> 61 #include <asm/console.h> 62 63 #include "proto.h" 64 #include "pci_impl.h" 65 66 67 struct hwrpb_struct *hwrpb; 68 EXPORT_SYMBOL(hwrpb); 69 unsigned long srm_hae; 70 71 int alpha_l1i_cacheshape; 72 int alpha_l1d_cacheshape; 73 int alpha_l2_cacheshape; 74 int alpha_l3_cacheshape; 75 76 #ifdef CONFIG_VERBOSE_MCHECK 77 /* 0=minimum, 1=verbose, 2=all */ 78 /* These can be overridden via the command line, ie "verbose_mcheck=2") */ 79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 80 #endif 81 82 /* Which processor we booted from. */ 83 int boot_cpuid; 84 85 /* 86 * Using SRM callbacks for initial console output. This works from 87 * setup_arch() time through the end of time_init(), as those places 88 * are under our (Alpha) control. 89 90 * "srmcons" specified in the boot command arguments allows us to 91 * see kernel messages during the period of time before the true 92 * console device is "registered" during console_init(). 93 * As of this version (2.5.59), console_init() will call 94 * disable_early_printk() as the last action before initializing 95 * the console drivers. That's the last possible time srmcons can be 96 * unregistered without interfering with console behavior. 97 * 98 * By default, OFF; set it with a bootcommand arg of "srmcons" or 99 * "console=srm". The meaning of these two args is: 100 * "srmcons" - early callback prints 101 * "console=srm" - full callback based console, including early prints 102 */ 103 int srmcons_output = 0; 104 105 /* Enforce a memory size limit; useful for testing. By default, none. */ 106 unsigned long mem_size_limit = 0; 107 108 /* Set AGP GART window size (0 means disabled). */ 109 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; 110 111 #ifdef CONFIG_ALPHA_GENERIC 112 struct alpha_machine_vector alpha_mv; 113 EXPORT_SYMBOL(alpha_mv); 114 #endif 115 116 #ifndef alpha_using_srm 117 int alpha_using_srm; 118 EXPORT_SYMBOL(alpha_using_srm); 119 #endif 120 121 #ifndef alpha_using_qemu 122 int alpha_using_qemu; 123 #endif 124 125 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, 126 unsigned long); 127 static struct alpha_machine_vector *get_sysvec_byname(const char *); 128 static void get_sysnames(unsigned long, unsigned long, unsigned long, 129 char **, char **); 130 static void determine_cpu_caches (unsigned int); 131 132 static char __initdata command_line[COMMAND_LINE_SIZE]; 133 134 #ifdef CONFIG_VGA_CONSOLE 135 /* 136 * The format of "screen_info" is strange, and due to early 137 * i386-setup code. This is just enough to make the console 138 * code think we're on a VGA color display. 139 */ 140 141 struct screen_info vgacon_screen_info = { 142 .orig_x = 0, 143 .orig_y = 25, 144 .orig_video_cols = 80, 145 .orig_video_lines = 25, 146 .orig_video_isVGA = 1, 147 .orig_video_points = 16 148 }; 149 #endif 150 151 /* 152 * The direct map I/O window, if any. This should be the same 153 * for all busses, since it's used by virt_to_bus. 154 */ 155 156 unsigned long __direct_map_base; 157 unsigned long __direct_map_size; 158 EXPORT_SYMBOL(__direct_map_base); 159 EXPORT_SYMBOL(__direct_map_size); 160 161 /* 162 * Declare all of the machine vectors. 163 */ 164 165 /* GCC 2.7.2 (on alpha at least) is lame. It does not support either 166 __attribute__((weak)) or #pragma weak. Bypass it and talk directly 167 to the assembler. */ 168 169 #define WEAK(X) \ 170 extern struct alpha_machine_vector X; \ 171 asm(".weak "#X) 172 173 WEAK(alcor_mv); 174 WEAK(alphabook1_mv); 175 WEAK(avanti_mv); 176 WEAK(cabriolet_mv); 177 WEAK(clipper_mv); 178 WEAK(dp264_mv); 179 WEAK(eb164_mv); 180 WEAK(eb64p_mv); 181 WEAK(eb66_mv); 182 WEAK(eb66p_mv); 183 WEAK(eiger_mv); 184 WEAK(lx164_mv); 185 WEAK(lynx_mv); 186 WEAK(marvel_ev7_mv); 187 WEAK(miata_mv); 188 WEAK(mikasa_mv); 189 WEAK(mikasa_primo_mv); 190 WEAK(monet_mv); 191 WEAK(nautilus_mv); 192 WEAK(noname_mv); 193 WEAK(noritake_mv); 194 WEAK(noritake_primo_mv); 195 WEAK(p2k_mv); 196 WEAK(pc164_mv); 197 WEAK(privateer_mv); 198 WEAK(rawhide_mv); 199 WEAK(ruffian_mv); 200 WEAK(rx164_mv); 201 WEAK(sable_mv); 202 WEAK(sable_gamma_mv); 203 WEAK(shark_mv); 204 WEAK(sx164_mv); 205 WEAK(takara_mv); 206 WEAK(titan_mv); 207 WEAK(webbrick_mv); 208 WEAK(wildfire_mv); 209 WEAK(xl_mv); 210 WEAK(xlt_mv); 211 212 #undef WEAK 213 214 /* 215 * I/O resources inherited from PeeCees. Except for perhaps the 216 * turbochannel alphas, everyone has these on some sort of SuperIO chip. 217 * 218 * ??? If this becomes less standard, move the struct out into the 219 * machine vector. 220 */ 221 222 static void __init 223 reserve_std_resources(void) 224 { 225 static struct resource standard_io_resources[] = { 226 { .name = "rtc", .start = 0x70, .end = 0x7f}, 227 { .name = "dma1", .start = 0x00, .end = 0x1f }, 228 { .name = "pic1", .start = 0x20, .end = 0x3f }, 229 { .name = "timer", .start = 0x40, .end = 0x5f }, 230 { .name = "keyboard", .start = 0x60, .end = 0x6f }, 231 { .name = "dma page reg", .start = 0x80, .end = 0x8f }, 232 { .name = "pic2", .start = 0xa0, .end = 0xbf }, 233 { .name = "dma2", .start = 0xc0, .end = 0xdf }, 234 }; 235 236 struct resource *io = &ioport_resource; 237 size_t i; 238 239 if (hose_head) { 240 struct pci_controller *hose; 241 for (hose = hose_head; hose; hose = hose->next) 242 if (hose->index == 0) { 243 io = hose->io_space; 244 break; 245 } 246 } 247 248 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) 249 request_resource(io, standard_io_resources+i); 250 } 251 252 #define PFN_MAX PFN_DOWN(0x80000000) 253 #define for_each_mem_cluster(memdesc, _cluster, i) \ 254 for ((_cluster) = (memdesc)->cluster, (i) = 0; \ 255 (i) < (memdesc)->numclusters; (i)++, (_cluster)++) 256 257 static unsigned long __init 258 get_mem_size_limit(char *s) 259 { 260 unsigned long end = 0; 261 char *from = s; 262 263 end = simple_strtoul(from, &from, 0); 264 if ( *from == 'K' || *from == 'k' ) { 265 end = end << 10; 266 from++; 267 } else if ( *from == 'M' || *from == 'm' ) { 268 end = end << 20; 269 from++; 270 } else if ( *from == 'G' || *from == 'g' ) { 271 end = end << 30; 272 from++; 273 } 274 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ 275 } 276 277 #ifdef CONFIG_BLK_DEV_INITRD 278 void * __init 279 move_initrd(unsigned long mem_limit) 280 { 281 void *start; 282 unsigned long size; 283 284 size = initrd_end - initrd_start; 285 start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE); 286 if (!start || __pa(start) + size > mem_limit) { 287 initrd_start = initrd_end = 0; 288 return NULL; 289 } 290 memmove(start, (void *)initrd_start, size); 291 initrd_start = (unsigned long)start; 292 initrd_end = initrd_start + size; 293 printk("initrd moved to %p\n", start); 294 return start; 295 } 296 #endif 297 298 static void __init 299 setup_memory(void *kernel_end) 300 { 301 struct memclust_struct * cluster; 302 struct memdesc_struct * memdesc; 303 unsigned long kernel_size; 304 unsigned long i; 305 306 /* Find free clusters, and init and free the bootmem accordingly. */ 307 memdesc = (struct memdesc_struct *) 308 (hwrpb->mddt_offset + (unsigned long) hwrpb); 309 310 for_each_mem_cluster(memdesc, cluster, i) { 311 unsigned long end; 312 313 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", 314 i, cluster->usage, cluster->start_pfn, 315 cluster->start_pfn + cluster->numpages); 316 317 end = cluster->start_pfn + cluster->numpages; 318 if (end > max_low_pfn) 319 max_low_pfn = end; 320 321 memblock_add(PFN_PHYS(cluster->start_pfn), 322 cluster->numpages << PAGE_SHIFT); 323 324 /* Bit 0 is console/PALcode reserved. Bit 1 is 325 non-volatile memory -- we might want to mark 326 this for later. */ 327 if (cluster->usage & 3) 328 memblock_reserve(PFN_PHYS(cluster->start_pfn), 329 cluster->numpages << PAGE_SHIFT); 330 } 331 332 /* 333 * Except for the NUMA systems (wildfire, marvel) all of the 334 * Alpha systems we run on support 32GB of memory or less. 335 * Since the NUMA systems introduce large holes in memory addressing, 336 * we can get into a situation where there is not enough contiguous 337 * memory for the memory map. 338 * 339 * Limit memory to the first 32GB to limit the NUMA systems to 340 * memory on their first node (wildfire) or 2 (marvel) to avoid 341 * not being able to produce the memory map. In order to access 342 * all of the memory on the NUMA systems, build with discontiguous 343 * memory support. 344 * 345 * If the user specified a memory limit, let that memory limit stand. 346 */ 347 if (!mem_size_limit) 348 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT; 349 350 if (mem_size_limit && max_low_pfn >= mem_size_limit) 351 { 352 printk("setup: forcing memory size to %ldK (from %ldK).\n", 353 mem_size_limit << (PAGE_SHIFT - 10), 354 max_low_pfn << (PAGE_SHIFT - 10)); 355 max_low_pfn = mem_size_limit; 356 } 357 358 /* Reserve the kernel memory. */ 359 kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; 360 memblock_reserve(KERNEL_START_PHYS, kernel_size); 361 362 #ifdef CONFIG_BLK_DEV_INITRD 363 initrd_start = INITRD_START; 364 if (initrd_start) { 365 initrd_end = initrd_start+INITRD_SIZE; 366 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 367 (void *) initrd_start, INITRD_SIZE); 368 369 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { 370 if (!move_initrd(PFN_PHYS(max_low_pfn))) 371 printk("initrd extends beyond end of memory " 372 "(0x%08lx > 0x%p)\ndisabling initrd\n", 373 initrd_end, 374 phys_to_virt(PFN_PHYS(max_low_pfn))); 375 } else { 376 memblock_reserve(virt_to_phys((void *)initrd_start), 377 INITRD_SIZE); 378 } 379 } 380 #endif /* CONFIG_BLK_DEV_INITRD */ 381 } 382 383 int page_is_ram(unsigned long pfn) 384 { 385 struct memclust_struct * cluster; 386 struct memdesc_struct * memdesc; 387 unsigned long i; 388 389 memdesc = (struct memdesc_struct *) 390 (hwrpb->mddt_offset + (unsigned long) hwrpb); 391 for_each_mem_cluster(memdesc, cluster, i) 392 { 393 if (pfn >= cluster->start_pfn && 394 pfn < cluster->start_pfn + cluster->numpages) { 395 return (cluster->usage & 3) ? 0 : 1; 396 } 397 } 398 399 return 0; 400 } 401 402 static int __init 403 register_cpus(void) 404 { 405 int i; 406 407 for_each_possible_cpu(i) { 408 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 409 if (!p) 410 return -ENOMEM; 411 register_cpu(p, i); 412 } 413 return 0; 414 } 415 416 arch_initcall(register_cpus); 417 418 #ifdef CONFIG_MAGIC_SYSRQ 419 static void sysrq_reboot_handler(u8 unused) 420 { 421 machine_halt(); 422 } 423 424 static const struct sysrq_key_op srm_sysrq_reboot_op = { 425 .handler = sysrq_reboot_handler, 426 .help_msg = "reboot(b)", 427 .action_msg = "Resetting", 428 .enable_mask = SYSRQ_ENABLE_BOOT, 429 }; 430 #endif 431 432 void __init 433 setup_arch(char **cmdline_p) 434 { 435 extern char _end[]; 436 437 struct alpha_machine_vector *vec = NULL; 438 struct percpu_struct *cpu; 439 char *type_name, *var_name, *p; 440 void *kernel_end = _end; /* end of kernel */ 441 char *args = command_line; 442 443 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); 444 boot_cpuid = hard_smp_processor_id(); 445 446 /* 447 * Pre-process the system type to make sure it will be valid. 448 * 449 * This may restore real CABRIO and EB66+ family names, ie 450 * EB64+ and EB66. 451 * 452 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series) 453 * and AS1200 (DIGITAL Server 5000 series) have the type as 454 * the negative of the real one. 455 */ 456 if ((long)hwrpb->sys_type < 0) { 457 hwrpb->sys_type = -((long)hwrpb->sys_type); 458 hwrpb_update_checksum(hwrpb); 459 } 460 461 /* Register a call for panic conditions. */ 462 atomic_notifier_chain_register(&panic_notifier_list, 463 &alpha_panic_block); 464 465 #ifndef alpha_using_srm 466 /* Assume that we've booted from SRM if we haven't booted from MILO. 467 Detect the later by looking for "MILO" in the system serial nr. */ 468 alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO"); 469 #endif 470 #ifndef alpha_using_qemu 471 /* Similarly, look for QEMU. */ 472 alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0; 473 #endif 474 475 /* If we are using SRM, we want to allow callbacks 476 as early as possible, so do this NOW, and then 477 they should work immediately thereafter. 478 */ 479 kernel_end = callback_init(kernel_end); 480 481 /* 482 * Locate the command line. 483 */ 484 strscpy(command_line, COMMAND_LINE, sizeof(command_line)); 485 strcpy(boot_command_line, command_line); 486 *cmdline_p = command_line; 487 488 /* 489 * Process command-line arguments. 490 */ 491 while ((p = strsep(&args, " \t")) != NULL) { 492 if (!*p) continue; 493 if (strncmp(p, "alpha_mv=", 9) == 0) { 494 vec = get_sysvec_byname(p+9); 495 continue; 496 } 497 if (strncmp(p, "cycle=", 6) == 0) { 498 est_cycle_freq = simple_strtol(p+6, NULL, 0); 499 continue; 500 } 501 if (strncmp(p, "mem=", 4) == 0) { 502 mem_size_limit = get_mem_size_limit(p+4); 503 continue; 504 } 505 if (strncmp(p, "srmcons", 7) == 0) { 506 srmcons_output |= 1; 507 continue; 508 } 509 if (strncmp(p, "console=srm", 11) == 0) { 510 srmcons_output |= 2; 511 continue; 512 } 513 if (strncmp(p, "gartsize=", 9) == 0) { 514 alpha_agpgart_size = 515 get_mem_size_limit(p+9) << PAGE_SHIFT; 516 continue; 517 } 518 #ifdef CONFIG_VERBOSE_MCHECK 519 if (strncmp(p, "verbose_mcheck=", 15) == 0) { 520 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0); 521 continue; 522 } 523 #endif 524 } 525 526 /* Replace the command line, now that we've killed it with strsep. */ 527 strcpy(command_line, boot_command_line); 528 529 /* If we want SRM console printk echoing early, do it now. */ 530 if (alpha_using_srm && srmcons_output) { 531 register_srm_console(); 532 533 /* 534 * If "console=srm" was specified, clear the srmcons_output 535 * flag now so that time.c won't unregister_srm_console 536 */ 537 if (srmcons_output & 2) 538 srmcons_output = 0; 539 } 540 541 #ifdef CONFIG_MAGIC_SYSRQ 542 /* If we're using SRM, make sysrq-b halt back to the prom, 543 not auto-reboot. */ 544 if (alpha_using_srm) { 545 unregister_sysrq_key('b', __sysrq_reboot_op); 546 register_sysrq_key('b', &srm_sysrq_reboot_op); 547 } 548 #endif 549 550 /* 551 * Identify and reconfigure for the current system. 552 */ 553 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); 554 555 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 556 cpu->type, &type_name, &var_name); 557 if (*var_name == '0') 558 var_name = ""; 559 560 if (!vec) { 561 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation, 562 cpu->type); 563 } 564 565 if (!vec) { 566 panic("Unsupported system type: %s%s%s (%ld %ld)\n", 567 type_name, (*var_name ? " variation " : ""), var_name, 568 hwrpb->sys_type, hwrpb->sys_variation); 569 } 570 if (vec != &alpha_mv) { 571 alpha_mv = *vec; 572 } 573 574 printk("Booting " 575 #ifdef CONFIG_ALPHA_GENERIC 576 "GENERIC " 577 #endif 578 "on %s%s%s using machine vector %s from %s\n", 579 type_name, (*var_name ? " variation " : ""), 580 var_name, alpha_mv.vector_name, 581 (alpha_using_srm ? "SRM" : "MILO")); 582 583 printk("Major Options: " 584 #ifdef CONFIG_SMP 585 "SMP " 586 #endif 587 #ifdef CONFIG_ALPHA_EV56 588 "EV56 " 589 #endif 590 #ifdef CONFIG_ALPHA_EV67 591 "EV67 " 592 #endif 593 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 594 "LEGACY_START " 595 #endif 596 #ifdef CONFIG_VERBOSE_MCHECK 597 "VERBOSE_MCHECK " 598 #endif 599 600 #ifdef CONFIG_DEBUG_SPINLOCK 601 "DEBUG_SPINLOCK " 602 #endif 603 #ifdef CONFIG_MAGIC_SYSRQ 604 "MAGIC_SYSRQ " 605 #endif 606 "\n"); 607 608 printk("Command line: %s\n", command_line); 609 610 /* 611 * Sync up the HAE. 612 * Save the SRM's current value for restoration. 613 */ 614 srm_hae = *alpha_mv.hae_register; 615 __set_hae(alpha_mv.hae_cache); 616 617 /* Reset enable correctable error reports. */ 618 wrmces(0x7); 619 620 /* Find our memory. */ 621 setup_memory(kernel_end); 622 memblock_set_bottom_up(true); 623 sparse_init(); 624 625 /* First guess at cpu cache sizes. Do this before init_arch. */ 626 determine_cpu_caches(cpu->type); 627 628 /* Initialize the machine. Usually has to do with setting up 629 DMA windows and the like. */ 630 if (alpha_mv.init_arch) 631 alpha_mv.init_arch(); 632 633 /* Reserve standard resources. */ 634 reserve_std_resources(); 635 636 /* 637 * Give us a default console. TGA users will see nothing until 638 * chr_dev_init is called, rather late in the boot sequence. 639 */ 640 641 #ifdef CONFIG_VT 642 #if defined(CONFIG_VGA_CONSOLE) 643 vgacon_register_screen(&vgacon_screen_info); 644 #endif 645 #endif 646 647 /* Default root filesystem to sda2. */ 648 ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); 649 650 #ifdef CONFIG_EISA 651 /* FIXME: only set this when we actually have EISA in this box? */ 652 EISA_bus = 1; 653 #endif 654 655 /* 656 * Check ASN in HWRPB for validity, report if bad. 657 * FIXME: how was this failing? Should we trust it instead, 658 * and copy the value into alpha_mv.max_asn? 659 */ 660 661 if (hwrpb->max_asn != MAX_ASN) { 662 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn); 663 } 664 665 /* 666 * Identify the flock of penguins. 667 */ 668 669 #ifdef CONFIG_SMP 670 setup_smp(); 671 #endif 672 paging_init(); 673 } 674 675 static char sys_unknown[] = "Unknown"; 676 static char systype_names[][16] = { 677 "0", 678 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen", 679 "Pelican", "Morgan", "Sable", "Medulla", "Noname", 680 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind", 681 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1", 682 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake", 683 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon", 684 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel" 685 }; 686 687 static char unofficial_names[][8] = {"100", "Ruffian"}; 688 689 static char api_names[][16] = {"200", "Nautilus"}; 690 691 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"}; 692 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4}; 693 694 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"}; 695 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2}; 696 697 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"}; 698 static int eb64p_indices[] = {0,0,1,2}; 699 700 static char eb66_names[][8] = {"EB66", "EB66+"}; 701 static int eb66_indices[] = {0,0,1}; 702 703 static char marvel_names[][16] = { 704 "Marvel/EV7" 705 }; 706 static int marvel_indices[] = { 0 }; 707 708 static char rawhide_names[][16] = { 709 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci" 710 }; 711 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; 712 713 static char titan_names[][16] = { 714 "DEFAULT", "Privateer", "Falcon", "Granite" 715 }; 716 static int titan_indices[] = {0,1,2,2,3}; 717 718 static char tsunami_names[][16] = { 719 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper", 720 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne", 721 "Flying Clipper", "Shark" 722 }; 723 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12}; 724 725 static struct alpha_machine_vector * __init 726 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu) 727 { 728 static struct alpha_machine_vector *systype_vecs[] __initdata = 729 { 730 NULL, /* 0 */ 731 NULL, /* ADU */ 732 NULL, /* Cobra */ 733 NULL, /* Ruby */ 734 NULL, /* Flamingo */ 735 NULL, /* Mannequin */ 736 NULL, /* Jensens */ 737 NULL, /* Pelican */ 738 NULL, /* Morgan */ 739 NULL, /* Sable -- see below. */ 740 NULL, /* Medulla */ 741 &noname_mv, 742 NULL, /* Turbolaser */ 743 &avanti_mv, 744 NULL, /* Mustang */ 745 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */ 746 NULL, /* Tradewind */ 747 NULL, /* Mikasa -- see below. */ 748 NULL, /* EB64 */ 749 NULL, /* EB66 -- see variation. */ 750 NULL, /* EB64+ -- see variation. */ 751 &alphabook1_mv, 752 &rawhide_mv, 753 NULL, /* K2 */ 754 &lynx_mv, /* Lynx */ 755 &xl_mv, 756 NULL, /* EB164 -- see variation. */ 757 NULL, /* Noritake -- see below. */ 758 NULL, /* Cortex */ 759 NULL, /* 29 */ 760 &miata_mv, 761 NULL, /* XXM */ 762 &takara_mv, 763 NULL, /* Yukon */ 764 NULL, /* Tsunami -- see variation. */ 765 &wildfire_mv, /* Wildfire */ 766 NULL, /* CUSCO */ 767 &eiger_mv, /* Eiger */ 768 NULL, /* Titan */ 769 NULL, /* Marvel */ 770 }; 771 772 static struct alpha_machine_vector *unofficial_vecs[] __initdata = 773 { 774 NULL, /* 100 */ 775 &ruffian_mv, 776 }; 777 778 static struct alpha_machine_vector *api_vecs[] __initdata = 779 { 780 NULL, /* 200 */ 781 &nautilus_mv, 782 }; 783 784 static struct alpha_machine_vector *alcor_vecs[] __initdata = 785 { 786 &alcor_mv, &xlt_mv, &xlt_mv 787 }; 788 789 static struct alpha_machine_vector *eb164_vecs[] __initdata = 790 { 791 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv 792 }; 793 794 static struct alpha_machine_vector *eb64p_vecs[] __initdata = 795 { 796 &eb64p_mv, 797 &cabriolet_mv, 798 &cabriolet_mv /* AlphaPCI64 */ 799 }; 800 801 static struct alpha_machine_vector *eb66_vecs[] __initdata = 802 { 803 &eb66_mv, 804 &eb66p_mv 805 }; 806 807 static struct alpha_machine_vector *marvel_vecs[] __initdata = 808 { 809 &marvel_ev7_mv, 810 }; 811 812 static struct alpha_machine_vector *titan_vecs[] __initdata = 813 { 814 &titan_mv, /* default */ 815 &privateer_mv, /* privateer */ 816 &titan_mv, /* falcon */ 817 &privateer_mv, /* granite */ 818 }; 819 820 static struct alpha_machine_vector *tsunami_vecs[] __initdata = 821 { 822 NULL, 823 &dp264_mv, /* dp264 */ 824 &dp264_mv, /* warhol */ 825 &dp264_mv, /* windjammer */ 826 &monet_mv, /* monet */ 827 &clipper_mv, /* clipper */ 828 &dp264_mv, /* goldrush */ 829 &webbrick_mv, /* webbrick */ 830 &dp264_mv, /* catamaran */ 831 NULL, /* brisbane? */ 832 NULL, /* melbourne? */ 833 NULL, /* flying clipper? */ 834 &shark_mv, /* shark */ 835 }; 836 837 /* ??? Do we need to distinguish between Rawhides? */ 838 839 struct alpha_machine_vector *vec; 840 841 /* Search the system tables first... */ 842 vec = NULL; 843 if (type < ARRAY_SIZE(systype_vecs)) { 844 vec = systype_vecs[type]; 845 } else if ((type > ST_API_BIAS) && 846 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) { 847 vec = api_vecs[type - ST_API_BIAS]; 848 } else if ((type > ST_UNOFFICIAL_BIAS) && 849 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) { 850 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; 851 } 852 853 /* If we've not found one, try for a variation. */ 854 855 if (!vec) { 856 /* Member ID is a bit-field. */ 857 unsigned long member = (variation >> 10) & 0x3f; 858 859 cpu &= 0xffffffff; /* make it usable */ 860 861 switch (type) { 862 case ST_DEC_ALCOR: 863 if (member < ARRAY_SIZE(alcor_indices)) 864 vec = alcor_vecs[alcor_indices[member]]; 865 break; 866 case ST_DEC_EB164: 867 if (member < ARRAY_SIZE(eb164_indices)) 868 vec = eb164_vecs[eb164_indices[member]]; 869 /* PC164 may show as EB164 variation with EV56 CPU, 870 but, since no true EB164 had anything but EV5... */ 871 if (vec == &eb164_mv && cpu == EV56_CPU) 872 vec = &pc164_mv; 873 break; 874 case ST_DEC_EB64P: 875 if (member < ARRAY_SIZE(eb64p_indices)) 876 vec = eb64p_vecs[eb64p_indices[member]]; 877 break; 878 case ST_DEC_EB66: 879 if (member < ARRAY_SIZE(eb66_indices)) 880 vec = eb66_vecs[eb66_indices[member]]; 881 break; 882 case ST_DEC_MARVEL: 883 if (member < ARRAY_SIZE(marvel_indices)) 884 vec = marvel_vecs[marvel_indices[member]]; 885 break; 886 case ST_DEC_TITAN: 887 vec = titan_vecs[0]; /* default */ 888 if (member < ARRAY_SIZE(titan_indices)) 889 vec = titan_vecs[titan_indices[member]]; 890 break; 891 case ST_DEC_TSUNAMI: 892 if (member < ARRAY_SIZE(tsunami_indices)) 893 vec = tsunami_vecs[tsunami_indices[member]]; 894 break; 895 case ST_DEC_1000: 896 if (cpu == EV5_CPU || cpu == EV56_CPU) 897 vec = &mikasa_primo_mv; 898 else 899 vec = &mikasa_mv; 900 break; 901 case ST_DEC_NORITAKE: 902 if (cpu == EV5_CPU || cpu == EV56_CPU) 903 vec = &noritake_primo_mv; 904 else 905 vec = &noritake_mv; 906 break; 907 case ST_DEC_2100_A500: 908 if (cpu == EV5_CPU || cpu == EV56_CPU) 909 vec = &sable_gamma_mv; 910 else 911 vec = &sable_mv; 912 break; 913 } 914 } 915 return vec; 916 } 917 918 static struct alpha_machine_vector * __init 919 get_sysvec_byname(const char *name) 920 { 921 static struct alpha_machine_vector *all_vecs[] __initdata = 922 { 923 &alcor_mv, 924 &alphabook1_mv, 925 &avanti_mv, 926 &cabriolet_mv, 927 &clipper_mv, 928 &dp264_mv, 929 &eb164_mv, 930 &eb64p_mv, 931 &eb66_mv, 932 &eb66p_mv, 933 &eiger_mv, 934 &lx164_mv, 935 &lynx_mv, 936 &miata_mv, 937 &mikasa_mv, 938 &mikasa_primo_mv, 939 &monet_mv, 940 &nautilus_mv, 941 &noname_mv, 942 &noritake_mv, 943 &noritake_primo_mv, 944 &p2k_mv, 945 &pc164_mv, 946 &privateer_mv, 947 &rawhide_mv, 948 &ruffian_mv, 949 &rx164_mv, 950 &sable_mv, 951 &sable_gamma_mv, 952 &shark_mv, 953 &sx164_mv, 954 &takara_mv, 955 &webbrick_mv, 956 &wildfire_mv, 957 &xl_mv, 958 &xlt_mv 959 }; 960 961 size_t i; 962 963 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) { 964 struct alpha_machine_vector *mv = all_vecs[i]; 965 if (strcasecmp(mv->vector_name, name) == 0) 966 return mv; 967 } 968 return NULL; 969 } 970 971 static void 972 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu, 973 char **type_name, char **variation_name) 974 { 975 unsigned long member; 976 977 /* If not in the tables, make it UNKNOWN, 978 else set type name to family */ 979 if (type < ARRAY_SIZE(systype_names)) { 980 *type_name = systype_names[type]; 981 } else if ((type > ST_API_BIAS) && 982 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) { 983 *type_name = api_names[type - ST_API_BIAS]; 984 } else if ((type > ST_UNOFFICIAL_BIAS) && 985 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) { 986 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; 987 } else { 988 *type_name = sys_unknown; 989 *variation_name = sys_unknown; 990 return; 991 } 992 993 /* Set variation to "0"; if variation is zero, done. */ 994 *variation_name = systype_names[0]; 995 if (variation == 0) { 996 return; 997 } 998 999 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ 1000 1001 cpu &= 0xffffffff; /* make it usable */ 1002 1003 switch (type) { /* select by family */ 1004 default: /* default to variation "0" for now */ 1005 break; 1006 case ST_DEC_EB164: 1007 if (member >= ARRAY_SIZE(eb164_indices)) 1008 break; 1009 *variation_name = eb164_names[eb164_indices[member]]; 1010 /* PC164 may show as EB164 variation, but with EV56 CPU, 1011 so, since no true EB164 had anything but EV5... */ 1012 if (eb164_indices[member] == 0 && cpu == EV56_CPU) 1013 *variation_name = eb164_names[1]; /* make it PC164 */ 1014 break; 1015 case ST_DEC_ALCOR: 1016 if (member < ARRAY_SIZE(alcor_indices)) 1017 *variation_name = alcor_names[alcor_indices[member]]; 1018 break; 1019 case ST_DEC_EB64P: 1020 if (member < ARRAY_SIZE(eb64p_indices)) 1021 *variation_name = eb64p_names[eb64p_indices[member]]; 1022 break; 1023 case ST_DEC_EB66: 1024 if (member < ARRAY_SIZE(eb66_indices)) 1025 *variation_name = eb66_names[eb66_indices[member]]; 1026 break; 1027 case ST_DEC_MARVEL: 1028 if (member < ARRAY_SIZE(marvel_indices)) 1029 *variation_name = marvel_names[marvel_indices[member]]; 1030 break; 1031 case ST_DEC_RAWHIDE: 1032 if (member < ARRAY_SIZE(rawhide_indices)) 1033 *variation_name = rawhide_names[rawhide_indices[member]]; 1034 break; 1035 case ST_DEC_TITAN: 1036 *variation_name = titan_names[0]; /* default */ 1037 if (member < ARRAY_SIZE(titan_indices)) 1038 *variation_name = titan_names[titan_indices[member]]; 1039 break; 1040 case ST_DEC_TSUNAMI: 1041 if (member < ARRAY_SIZE(tsunami_indices)) 1042 *variation_name = tsunami_names[tsunami_indices[member]]; 1043 break; 1044 } 1045 } 1046 1047 /* 1048 * A change was made to the HWRPB via an ECO and the following code 1049 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO 1050 * was not implemented in the console firmware. If it's revision 5 or 1051 * greater we can get the name of the platform as an ASCII string from 1052 * the HWRPB. That's what this function does. It checks the revision 1053 * level and if the string is in the HWRPB it returns the address of 1054 * the string--a pointer to the name of the platform. 1055 * 1056 * Returns: 1057 * - Pointer to a ASCII string if it's in the HWRPB 1058 * - Pointer to a blank string if the data is not in the HWRPB. 1059 */ 1060 1061 static char * 1062 platform_string(void) 1063 { 1064 struct dsr_struct *dsr; 1065 static char unk_system_string[] = "N/A"; 1066 1067 /* Go to the console for the string pointer. 1068 * If the rpb_vers is not 5 or greater the rpb 1069 * is old and does not have this data in it. 1070 */ 1071 if (hwrpb->revision < 5) 1072 return (unk_system_string); 1073 else { 1074 /* The Dynamic System Recognition struct 1075 * has the system platform name starting 1076 * after the character count of the string. 1077 */ 1078 dsr = ((struct dsr_struct *) 1079 ((char *)hwrpb + hwrpb->dsr_offset)); 1080 return ((char *)dsr + (dsr->sysname_off + 1081 sizeof(long))); 1082 } 1083 } 1084 1085 static int 1086 get_nr_processors(struct percpu_struct *cpubase, unsigned long num) 1087 { 1088 struct percpu_struct *cpu; 1089 unsigned long i; 1090 int count = 0; 1091 1092 for (i = 0; i < num; i++) { 1093 cpu = (struct percpu_struct *) 1094 ((char *)cpubase + i*hwrpb->processor_size); 1095 if ((cpu->flags & 0x1cc) == 0x1cc) 1096 count++; 1097 } 1098 return count; 1099 } 1100 1101 static void 1102 show_cache_size (struct seq_file *f, const char *which, int shape) 1103 { 1104 if (shape == -1) 1105 seq_printf (f, "%s\t\t: n/a\n", which); 1106 else if (shape == 0) 1107 seq_printf (f, "%s\t\t: unknown\n", which); 1108 else 1109 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n", 1110 which, shape >> 10, shape & 15, 1111 1 << ((shape >> 4) & 15)); 1112 } 1113 1114 static int 1115 show_cpuinfo(struct seq_file *f, void *slot) 1116 { 1117 extern struct unaligned_stat { 1118 unsigned long count, va, pc; 1119 } unaligned[2]; 1120 1121 static char cpu_names[][8] = { 1122 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56", 1123 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL", 1124 "EV68CX", "EV7", "EV79", "EV69" 1125 }; 1126 1127 struct percpu_struct *cpu = slot; 1128 unsigned int cpu_index; 1129 char *cpu_name; 1130 char *systype_name; 1131 char *sysvariation_name; 1132 int nr_processors; 1133 unsigned long timer_freq; 1134 1135 cpu_index = (unsigned) (cpu->type - 1); 1136 cpu_name = "Unknown"; 1137 if (cpu_index < ARRAY_SIZE(cpu_names)) 1138 cpu_name = cpu_names[cpu_index]; 1139 1140 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 1141 cpu->type, &systype_name, &sysvariation_name); 1142 1143 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); 1144 1145 #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 1146 timer_freq = (100UL * hwrpb->intr_freq) / 4096; 1147 #else 1148 timer_freq = 100UL * CONFIG_HZ; 1149 #endif 1150 1151 seq_printf(f, "cpu\t\t\t: Alpha\n" 1152 "cpu model\t\t: %s\n" 1153 "cpu variation\t\t: %ld\n" 1154 "cpu revision\t\t: %ld\n" 1155 "cpu serial number\t: %s\n" 1156 "system type\t\t: %s\n" 1157 "system variation\t: %s\n" 1158 "system revision\t\t: %ld\n" 1159 "system serial number\t: %s\n" 1160 "cycle frequency [Hz]\t: %lu %s\n" 1161 "timer frequency [Hz]\t: %lu.%02lu\n" 1162 "page size [bytes]\t: %ld\n" 1163 "phys. address bits\t: %ld\n" 1164 "max. addr. space #\t: %ld\n" 1165 "BogoMIPS\t\t: %lu.%02lu\n" 1166 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" 1167 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" 1168 "platform string\t\t: %s\n" 1169 "cpus detected\t\t: %d\n", 1170 cpu_name, cpu->variation, cpu->revision, 1171 (char*)cpu->serial_no, 1172 systype_name, sysvariation_name, hwrpb->sys_revision, 1173 (char*)hwrpb->ssn, 1174 est_cycle_freq ? : hwrpb->cycle_freq, 1175 est_cycle_freq ? "est." : "", 1176 timer_freq / 100, timer_freq % 100, 1177 hwrpb->pagesize, 1178 hwrpb->pa_bits, 1179 hwrpb->max_asn, 1180 loops_per_jiffy / (500000/HZ), 1181 (loops_per_jiffy / (5000/HZ)) % 100, 1182 unaligned[0].count, unaligned[0].pc, unaligned[0].va, 1183 unaligned[1].count, unaligned[1].pc, unaligned[1].va, 1184 platform_string(), nr_processors); 1185 1186 #ifdef CONFIG_SMP 1187 seq_printf(f, "cpus active\t\t: %u\n" 1188 "cpu active mask\t\t: %016lx\n", 1189 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]); 1190 #endif 1191 1192 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); 1193 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape); 1194 show_cache_size (f, "L2 cache", alpha_l2_cacheshape); 1195 show_cache_size (f, "L3 cache", alpha_l3_cacheshape); 1196 1197 return 0; 1198 } 1199 1200 static int __init 1201 read_mem_block(int *addr, int stride, int size) 1202 { 1203 long nloads = size / stride, cnt, tmp; 1204 1205 __asm__ __volatile__( 1206 " rpcc %0\n" 1207 "1: ldl %3,0(%2)\n" 1208 " subq %1,1,%1\n" 1209 /* Next two XORs introduce an explicit data dependency between 1210 consecutive loads in the loop, which will give us true load 1211 latency. */ 1212 " xor %3,%2,%2\n" 1213 " xor %3,%2,%2\n" 1214 " addq %2,%4,%2\n" 1215 " bne %1,1b\n" 1216 " rpcc %3\n" 1217 " subl %3,%0,%0\n" 1218 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp) 1219 : "r" (stride), "1" (nloads), "2" (addr)); 1220 1221 return cnt / (size / stride); 1222 } 1223 1224 #define CSHAPE(totalsize, linesize, assoc) \ 1225 ((totalsize & ~0xff) | (linesize << 4) | assoc) 1226 1227 /* ??? EV5 supports up to 64M, but did the systems with more than 1228 16M of BCACHE ever exist? */ 1229 #define MAX_BCACHE_SIZE 16*1024*1024 1230 1231 /* Note that the offchip caches are direct mapped on all Alphas. */ 1232 static int __init 1233 external_cache_probe(int minsize, int width) 1234 { 1235 int cycles, prev_cycles = 1000000; 1236 int stride = 1 << width; 1237 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; 1238 1239 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) 1240 maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT); 1241 1242 /* Get the first block cached. */ 1243 read_mem_block(__va(0), stride, size); 1244 1245 while (size < maxsize) { 1246 /* Get an average load latency in cycles. */ 1247 cycles = read_mem_block(__va(0), stride, size); 1248 if (cycles > prev_cycles * 2) { 1249 /* Fine, we exceed the cache. */ 1250 printk("%ldK Bcache detected; load hit latency %d " 1251 "cycles, load miss latency %d cycles\n", 1252 size >> 11, prev_cycles, cycles); 1253 return CSHAPE(size >> 1, width, 1); 1254 } 1255 /* Try to get the next block cached. */ 1256 read_mem_block(__va(size), stride, size); 1257 prev_cycles = cycles; 1258 size <<= 1; 1259 } 1260 return -1; /* No BCACHE found. */ 1261 } 1262 1263 static void __init 1264 determine_cpu_caches (unsigned int cpu_type) 1265 { 1266 int L1I, L1D, L2, L3; 1267 1268 switch (cpu_type) { 1269 case EV4_CPU: 1270 case EV45_CPU: 1271 { 1272 if (cpu_type == EV4_CPU) 1273 L1I = CSHAPE(8*1024, 5, 1); 1274 else 1275 L1I = CSHAPE(16*1024, 5, 1); 1276 L1D = L1I; 1277 L3 = -1; 1278 1279 /* BIU_CTL is a write-only Abox register. PALcode has a 1280 shadow copy, and may be available from some versions 1281 of the CSERVE PALcall. If we can get it, then 1282 1283 unsigned long biu_ctl, size; 1284 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7)); 1285 L2 = CSHAPE (size, 5, 1); 1286 1287 Unfortunately, we can't rely on that. 1288 */ 1289 L2 = external_cache_probe(128*1024, 5); 1290 break; 1291 } 1292 1293 case LCA4_CPU: 1294 { 1295 unsigned long car, size; 1296 1297 L1I = L1D = CSHAPE(8*1024, 5, 1); 1298 L3 = -1; 1299 1300 car = *(vuip) phys_to_virt (0x120000078UL); 1301 size = 64*1024 * (1 << ((car >> 5) & 7)); 1302 /* No typo -- 8 byte cacheline size. Whodathunk. */ 1303 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1); 1304 break; 1305 } 1306 1307 case EV5_CPU: 1308 case EV56_CPU: 1309 { 1310 unsigned long sc_ctl, width; 1311 1312 L1I = L1D = CSHAPE(8*1024, 5, 1); 1313 1314 /* Check the line size of the Scache. */ 1315 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL); 1316 width = sc_ctl & 0x1000 ? 6 : 5; 1317 L2 = CSHAPE (96*1024, width, 3); 1318 1319 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode 1320 has a shadow copy, and may be available from some versions 1321 of the CSERVE PALcall. If we can get it, then 1322 1323 unsigned long bc_control, bc_config, size; 1324 size = 1024*1024 * (1 << ((bc_config & 7) - 1)); 1325 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1); 1326 1327 Unfortunately, we can't rely on that. 1328 */ 1329 L3 = external_cache_probe(1024*1024, width); 1330 break; 1331 } 1332 1333 case PCA56_CPU: 1334 case PCA57_CPU: 1335 { 1336 if (cpu_type == PCA56_CPU) { 1337 L1I = CSHAPE(16*1024, 6, 1); 1338 L1D = CSHAPE(8*1024, 5, 1); 1339 } else { 1340 L1I = CSHAPE(32*1024, 6, 2); 1341 L1D = CSHAPE(16*1024, 5, 1); 1342 } 1343 L3 = -1; 1344 1345 #if 0 1346 unsigned long cbox_config, size; 1347 1348 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); 1349 size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); 1350 1351 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); 1352 #else 1353 L2 = external_cache_probe(512*1024, 6); 1354 #endif 1355 break; 1356 } 1357 1358 case EV6_CPU: 1359 case EV67_CPU: 1360 case EV68CB_CPU: 1361 case EV68AL_CPU: 1362 case EV68CX_CPU: 1363 case EV69_CPU: 1364 L1I = L1D = CSHAPE(64*1024, 6, 2); 1365 L2 = external_cache_probe(1024*1024, 6); 1366 L3 = -1; 1367 break; 1368 1369 case EV7_CPU: 1370 case EV79_CPU: 1371 L1I = L1D = CSHAPE(64*1024, 6, 2); 1372 L2 = CSHAPE(7*1024*1024/4, 6, 7); 1373 L3 = -1; 1374 break; 1375 1376 default: 1377 /* Nothing known about this cpu type. */ 1378 L1I = L1D = L2 = L3 = 0; 1379 break; 1380 } 1381 1382 alpha_l1i_cacheshape = L1I; 1383 alpha_l1d_cacheshape = L1D; 1384 alpha_l2_cacheshape = L2; 1385 alpha_l3_cacheshape = L3; 1386 } 1387 1388 /* 1389 * We show only CPU #0 info. 1390 */ 1391 static void * 1392 c_start(struct seq_file *f, loff_t *pos) 1393 { 1394 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset; 1395 } 1396 1397 static void * 1398 c_next(struct seq_file *f, void *v, loff_t *pos) 1399 { 1400 (*pos)++; 1401 return NULL; 1402 } 1403 1404 static void 1405 c_stop(struct seq_file *f, void *v) 1406 { 1407 } 1408 1409 const struct seq_operations cpuinfo_op = { 1410 .start = c_start, 1411 .next = c_next, 1412 .stop = c_stop, 1413 .show = show_cpuinfo, 1414 }; 1415 1416 1417 static int 1418 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 1419 { 1420 #if 1 1421 /* FIXME FIXME FIXME */ 1422 /* If we are using SRM and serial console, just hard halt here. */ 1423 if (alpha_using_srm && srmcons_output) 1424 __halt(); 1425 #endif 1426 return NOTIFY_DONE; 1427 } 1428 1429 static __init int add_pcspkr(void) 1430 { 1431 struct platform_device *pd; 1432 int ret; 1433 1434 pd = platform_device_alloc("pcspkr", -1); 1435 if (!pd) 1436 return -ENOMEM; 1437 1438 ret = platform_device_add(pd); 1439 if (ret) 1440 platform_device_put(pd); 1441 1442 return ret; 1443 } 1444 device_initcall(add_pcspkr); 1445