1 /* 2 * 3 * Common boot and setup code. 4 * 5 * Copyright (C) 2001 PPC64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #undef DEBUG 14 15 #include <linux/config.h> 16 #include <linux/module.h> 17 #include <linux/string.h> 18 #include <linux/sched.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/reboot.h> 22 #include <linux/delay.h> 23 #include <linux/initrd.h> 24 #include <linux/ide.h> 25 #include <linux/seq_file.h> 26 #include <linux/ioport.h> 27 #include <linux/console.h> 28 #include <linux/utsname.h> 29 #include <linux/tty.h> 30 #include <linux/root_dev.h> 31 #include <linux/notifier.h> 32 #include <linux/cpu.h> 33 #include <linux/unistd.h> 34 #include <linux/serial.h> 35 #include <linux/serial_8250.h> 36 #include <asm/io.h> 37 #include <asm/prom.h> 38 #include <asm/processor.h> 39 #include <asm/pgtable.h> 40 #include <asm/smp.h> 41 #include <asm/elf.h> 42 #include <asm/machdep.h> 43 #include <asm/paca.h> 44 #include <asm/ppcdebug.h> 45 #include <asm/time.h> 46 #include <asm/cputable.h> 47 #include <asm/sections.h> 48 #include <asm/btext.h> 49 #include <asm/nvram.h> 50 #include <asm/setup.h> 51 #include <asm/system.h> 52 #include <asm/rtas.h> 53 #include <asm/iommu.h> 54 #include <asm/serial.h> 55 #include <asm/cache.h> 56 #include <asm/page.h> 57 #include <asm/mmu.h> 58 #include <asm/lmb.h> 59 #include <asm/iseries/it_lp_naca.h> 60 #include <asm/firmware.h> 61 #include <asm/systemcfg.h> 62 #include <asm/xmon.h> 63 64 #ifdef DEBUG 65 #define DBG(fmt...) udbg_printf(fmt) 66 #else 67 #define DBG(fmt...) 68 #endif 69 70 /* 71 * Here are some early debugging facilities. You can enable one 72 * but your kernel will not boot on anything else if you do so 73 */ 74 75 /* This one is for use on LPAR machines that support an HVC console 76 * on vterm 0 77 */ 78 extern void udbg_init_debug_lpar(void); 79 /* This one is for use on Apple G5 machines 80 */ 81 extern void udbg_init_pmac_realmode(void); 82 /* That's RTAS panel debug */ 83 extern void call_rtas_display_status_delay(unsigned char c); 84 /* Here's maple real mode debug */ 85 extern void udbg_init_maple_realmode(void); 86 87 #define EARLY_DEBUG_INIT() do {} while(0) 88 89 #if 0 90 #define EARLY_DEBUG_INIT() udbg_init_debug_lpar() 91 #define EARLY_DEBUG_INIT() udbg_init_maple_realmode() 92 #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode() 93 #define EARLY_DEBUG_INIT() \ 94 do { udbg_putc = call_rtas_display_status_delay; } while(0) 95 #endif 96 97 /* extern void *stab; */ 98 extern unsigned long klimit; 99 100 extern void mm_init_ppc64(void); 101 extern void stab_initialize(unsigned long stab); 102 extern void htab_initialize(void); 103 extern void early_init_devtree(void *flat_dt); 104 extern void unflatten_device_tree(void); 105 106 int have_of = 1; 107 int boot_cpuid = 0; 108 int boot_cpuid_phys = 0; 109 dev_t boot_dev; 110 u64 ppc64_pft_size; 111 112 struct ppc64_caches ppc64_caches; 113 EXPORT_SYMBOL_GPL(ppc64_caches); 114 115 /* 116 * These are used in binfmt_elf.c to put aux entries on the stack 117 * for each elf executable being started. 118 */ 119 int dcache_bsize; 120 int icache_bsize; 121 int ucache_bsize; 122 123 /* The main machine-dep calls structure 124 */ 125 struct machdep_calls ppc_md; 126 EXPORT_SYMBOL(ppc_md); 127 128 #ifdef CONFIG_MAGIC_SYSRQ 129 unsigned long SYSRQ_KEY; 130 #endif /* CONFIG_MAGIC_SYSRQ */ 131 132 133 static int ppc64_panic_event(struct notifier_block *, unsigned long, void *); 134 static struct notifier_block ppc64_panic_block = { 135 .notifier_call = ppc64_panic_event, 136 .priority = INT_MIN /* may not return; must be done last */ 137 }; 138 139 #ifdef CONFIG_SMP 140 141 static int smt_enabled_cmdline; 142 143 /* Look for ibm,smt-enabled OF option */ 144 static void check_smt_enabled(void) 145 { 146 struct device_node *dn; 147 char *smt_option; 148 149 /* Allow the command line to overrule the OF option */ 150 if (smt_enabled_cmdline) 151 return; 152 153 dn = of_find_node_by_path("/options"); 154 155 if (dn) { 156 smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL); 157 158 if (smt_option) { 159 if (!strcmp(smt_option, "on")) 160 smt_enabled_at_boot = 1; 161 else if (!strcmp(smt_option, "off")) 162 smt_enabled_at_boot = 0; 163 } 164 } 165 } 166 167 /* Look for smt-enabled= cmdline option */ 168 static int __init early_smt_enabled(char *p) 169 { 170 smt_enabled_cmdline = 1; 171 172 if (!p) 173 return 0; 174 175 if (!strcmp(p, "on") || !strcmp(p, "1")) 176 smt_enabled_at_boot = 1; 177 else if (!strcmp(p, "off") || !strcmp(p, "0")) 178 smt_enabled_at_boot = 0; 179 180 return 0; 181 } 182 early_param("smt-enabled", early_smt_enabled); 183 184 #else 185 #define check_smt_enabled() 186 #endif /* CONFIG_SMP */ 187 188 extern struct machdep_calls pSeries_md; 189 extern struct machdep_calls pmac_md; 190 extern struct machdep_calls maple_md; 191 extern struct machdep_calls cell_md; 192 extern struct machdep_calls iseries_md; 193 194 /* Ultimately, stuff them in an elf section like initcalls... */ 195 static struct machdep_calls __initdata *machines[] = { 196 #ifdef CONFIG_PPC_PSERIES 197 &pSeries_md, 198 #endif /* CONFIG_PPC_PSERIES */ 199 #ifdef CONFIG_PPC_PMAC 200 &pmac_md, 201 #endif /* CONFIG_PPC_PMAC */ 202 #ifdef CONFIG_PPC_MAPLE 203 &maple_md, 204 #endif /* CONFIG_PPC_MAPLE */ 205 #ifdef CONFIG_PPC_CELL 206 &cell_md, 207 #endif 208 #ifdef CONFIG_PPC_ISERIES 209 &iseries_md, 210 #endif 211 NULL 212 }; 213 214 /* 215 * Early initialization entry point. This is called by head.S 216 * with MMU translation disabled. We rely on the "feature" of 217 * the CPU that ignores the top 2 bits of the address in real 218 * mode so we can access kernel globals normally provided we 219 * only toy with things in the RMO region. From here, we do 220 * some early parsing of the device-tree to setup out LMB 221 * data structures, and allocate & initialize the hash table 222 * and segment tables so we can start running with translation 223 * enabled. 224 * 225 * It is this function which will call the probe() callback of 226 * the various platform types and copy the matching one to the 227 * global ppc_md structure. Your platform can eventually do 228 * some very early initializations from the probe() routine, but 229 * this is not recommended, be very careful as, for example, the 230 * device-tree is not accessible via normal means at this point. 231 */ 232 233 void __init early_setup(unsigned long dt_ptr) 234 { 235 struct paca_struct *lpaca = get_paca(); 236 static struct machdep_calls **mach; 237 238 /* 239 * Enable early debugging if any specified (see top of 240 * this file) 241 */ 242 EARLY_DEBUG_INIT(); 243 244 DBG(" -> early_setup()\n"); 245 246 /* 247 * Fill the default DBG level (do we want to keep 248 * that old mecanism around forever ?) 249 */ 250 ppcdbg_initialize(); 251 252 /* 253 * Do early initializations using the flattened device 254 * tree, like retreiving the physical memory map or 255 * calculating/retreiving the hash table size 256 */ 257 early_init_devtree(__va(dt_ptr)); 258 259 /* 260 * Iterate all ppc_md structures until we find the proper 261 * one for the current machine type 262 */ 263 DBG("Probing machine type for platform %x...\n", 264 systemcfg->platform); 265 266 for (mach = machines; *mach; mach++) { 267 if ((*mach)->probe(systemcfg->platform)) 268 break; 269 } 270 /* What can we do if we didn't find ? */ 271 if (*mach == NULL) { 272 DBG("No suitable machine found !\n"); 273 for (;;); 274 } 275 ppc_md = **mach; 276 277 DBG("Found, Initializing memory management...\n"); 278 279 /* 280 * Initialize the MMU Hash table and create the linear mapping 281 * of memory. Has to be done before stab/slb initialization as 282 * this is currently where the page size encoding is obtained 283 */ 284 htab_initialize(); 285 286 /* 287 * Initialize stab / SLB management except on iSeries 288 */ 289 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 290 if (cpu_has_feature(CPU_FTR_SLB)) 291 slb_initialize(); 292 else 293 stab_initialize(lpaca->stab_real); 294 } 295 296 DBG(" <- early_setup()\n"); 297 } 298 299 300 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 301 void smp_release_cpus(void) 302 { 303 extern unsigned long __secondary_hold_spinloop; 304 305 DBG(" -> smp_release_cpus()\n"); 306 307 /* All secondary cpus are spinning on a common spinloop, release them 308 * all now so they can start to spin on their individual paca 309 * spinloops. For non SMP kernels, the secondary cpus never get out 310 * of the common spinloop. 311 * This is useless but harmless on iSeries, secondaries are already 312 * waiting on their paca spinloops. */ 313 314 __secondary_hold_spinloop = 1; 315 mb(); 316 317 DBG(" <- smp_release_cpus()\n"); 318 } 319 #else 320 #define smp_release_cpus() 321 #endif /* CONFIG_SMP || CONFIG_KEXEC */ 322 323 /* 324 * Initialize some remaining members of the ppc64_caches and systemcfg structures 325 * (at least until we get rid of them completely). This is mostly some 326 * cache informations about the CPU that will be used by cache flush 327 * routines and/or provided to userland 328 */ 329 static void __init initialize_cache_info(void) 330 { 331 struct device_node *np; 332 unsigned long num_cpus = 0; 333 334 DBG(" -> initialize_cache_info()\n"); 335 336 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { 337 num_cpus += 1; 338 339 /* We're assuming *all* of the CPUs have the same 340 * d-cache and i-cache sizes... -Peter 341 */ 342 343 if ( num_cpus == 1 ) { 344 u32 *sizep, *lsizep; 345 u32 size, lsize; 346 const char *dc, *ic; 347 348 /* Then read cache informations */ 349 if (systemcfg->platform == PLATFORM_POWERMAC) { 350 dc = "d-cache-block-size"; 351 ic = "i-cache-block-size"; 352 } else { 353 dc = "d-cache-line-size"; 354 ic = "i-cache-line-size"; 355 } 356 357 size = 0; 358 lsize = cur_cpu_spec->dcache_bsize; 359 sizep = (u32 *)get_property(np, "d-cache-size", NULL); 360 if (sizep != NULL) 361 size = *sizep; 362 lsizep = (u32 *) get_property(np, dc, NULL); 363 if (lsizep != NULL) 364 lsize = *lsizep; 365 if (sizep == 0 || lsizep == 0) 366 DBG("Argh, can't find dcache properties ! " 367 "sizep: %p, lsizep: %p\n", sizep, lsizep); 368 369 systemcfg->dcache_size = ppc64_caches.dsize = size; 370 systemcfg->dcache_line_size = 371 ppc64_caches.dline_size = lsize; 372 ppc64_caches.log_dline_size = __ilog2(lsize); 373 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; 374 375 size = 0; 376 lsize = cur_cpu_spec->icache_bsize; 377 sizep = (u32 *)get_property(np, "i-cache-size", NULL); 378 if (sizep != NULL) 379 size = *sizep; 380 lsizep = (u32 *)get_property(np, ic, NULL); 381 if (lsizep != NULL) 382 lsize = *lsizep; 383 if (sizep == 0 || lsizep == 0) 384 DBG("Argh, can't find icache properties ! " 385 "sizep: %p, lsizep: %p\n", sizep, lsizep); 386 387 systemcfg->icache_size = ppc64_caches.isize = size; 388 systemcfg->icache_line_size = 389 ppc64_caches.iline_size = lsize; 390 ppc64_caches.log_iline_size = __ilog2(lsize); 391 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; 392 } 393 } 394 395 /* Add an eye catcher and the systemcfg layout version number */ 396 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 397 systemcfg->version.major = SYSTEMCFG_MAJOR; 398 systemcfg->version.minor = SYSTEMCFG_MINOR; 399 systemcfg->processor = mfspr(SPRN_PVR); 400 401 DBG(" <- initialize_cache_info()\n"); 402 } 403 404 static void __init check_for_initrd(void) 405 { 406 #ifdef CONFIG_BLK_DEV_INITRD 407 u64 *prop; 408 409 DBG(" -> check_for_initrd()\n"); 410 411 if (of_chosen) { 412 prop = (u64 *)get_property(of_chosen, 413 "linux,initrd-start", NULL); 414 if (prop != NULL) { 415 initrd_start = (unsigned long)__va(*prop); 416 prop = (u64 *)get_property(of_chosen, 417 "linux,initrd-end", NULL); 418 if (prop != NULL) { 419 initrd_end = (unsigned long)__va(*prop); 420 initrd_below_start_ok = 1; 421 } else 422 initrd_start = 0; 423 } 424 } 425 426 /* If we were passed an initrd, set the ROOT_DEV properly if the values 427 * look sensible. If not, clear initrd reference. 428 */ 429 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE && 430 initrd_end > initrd_start) 431 ROOT_DEV = Root_RAM0; 432 else 433 initrd_start = initrd_end = 0; 434 435 if (initrd_start) 436 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); 437 438 DBG(" <- check_for_initrd()\n"); 439 #endif /* CONFIG_BLK_DEV_INITRD */ 440 } 441 442 /* 443 * Do some initial setup of the system. The parameters are those which 444 * were passed in from the bootloader. 445 */ 446 void __init setup_system(void) 447 { 448 DBG(" -> setup_system()\n"); 449 450 /* 451 * Unflatten the device-tree passed by prom_init or kexec 452 */ 453 unflatten_device_tree(); 454 455 /* 456 * Fill the ppc64_caches & systemcfg structures with informations 457 * retreived from the device-tree. Need to be called before 458 * finish_device_tree() since the later requires some of the 459 * informations filled up here to properly parse the interrupt 460 * tree. 461 * It also sets up the cache line sizes which allows to call 462 * routines like flush_icache_range (used by the hash init 463 * later on). 464 */ 465 initialize_cache_info(); 466 467 #ifdef CONFIG_PPC_RTAS 468 /* 469 * Initialize RTAS if available 470 */ 471 rtas_initialize(); 472 #endif /* CONFIG_PPC_RTAS */ 473 474 /* 475 * Check if we have an initrd provided via the device-tree 476 */ 477 check_for_initrd(); 478 479 /* 480 * Do some platform specific early initializations, that includes 481 * setting up the hash table pointers. It also sets up some interrupt-mapping 482 * related options that will be used by finish_device_tree() 483 */ 484 ppc_md.init_early(); 485 486 /* 487 * "Finish" the device-tree, that is do the actual parsing of 488 * some of the properties like the interrupt map 489 */ 490 finish_device_tree(); 491 492 #ifdef CONFIG_BOOTX_TEXT 493 init_boot_display(); 494 #endif 495 496 /* 497 * Initialize xmon 498 */ 499 #ifdef CONFIG_XMON_DEFAULT 500 xmon_init(1); 501 #endif 502 /* 503 * Register early console 504 */ 505 register_early_udbg_console(); 506 507 /* Save unparsed command line copy for /proc/cmdline */ 508 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 509 510 parse_early_param(); 511 512 check_smt_enabled(); 513 smp_setup_cpu_maps(); 514 515 /* Release secondary cpus out of their spinloops at 0x60 now that 516 * we can map physical -> logical CPU ids 517 */ 518 smp_release_cpus(); 519 520 printk("Starting Linux PPC64 %s\n", system_utsname.version); 521 522 printk("-----------------------------------------------------\n"); 523 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 524 printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch); 525 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller); 526 printk("systemcfg = 0x%p\n", systemcfg); 527 printk("systemcfg->platform = 0x%x\n", systemcfg->platform); 528 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount); 529 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize); 530 printk("ppc64_caches.dcache_line_size = 0x%x\n", 531 ppc64_caches.dline_size); 532 printk("ppc64_caches.icache_line_size = 0x%x\n", 533 ppc64_caches.iline_size); 534 printk("htab_address = 0x%p\n", htab_address); 535 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 536 printk("-----------------------------------------------------\n"); 537 538 mm_init_ppc64(); 539 540 DBG(" <- setup_system()\n"); 541 } 542 543 static int ppc64_panic_event(struct notifier_block *this, 544 unsigned long event, void *ptr) 545 { 546 ppc_md.panic((char *)ptr); /* May not return */ 547 return NOTIFY_DONE; 548 } 549 550 #ifdef CONFIG_IRQSTACKS 551 static void __init irqstack_early_init(void) 552 { 553 unsigned int i; 554 555 /* 556 * interrupt stacks must be under 256MB, we cannot afford to take 557 * SLB misses on them. 558 */ 559 for_each_cpu(i) { 560 softirq_ctx[i] = (struct thread_info *) 561 __va(lmb_alloc_base(THREAD_SIZE, 562 THREAD_SIZE, 0x10000000)); 563 hardirq_ctx[i] = (struct thread_info *) 564 __va(lmb_alloc_base(THREAD_SIZE, 565 THREAD_SIZE, 0x10000000)); 566 } 567 } 568 #else 569 #define irqstack_early_init() 570 #endif 571 572 /* 573 * Stack space used when we detect a bad kernel stack pointer, and 574 * early in SMP boots before relocation is enabled. 575 */ 576 static void __init emergency_stack_init(void) 577 { 578 unsigned long limit; 579 unsigned int i; 580 581 /* 582 * Emergency stacks must be under 256MB, we cannot afford to take 583 * SLB misses on them. The ABI also requires them to be 128-byte 584 * aligned. 585 * 586 * Since we use these as temporary stacks during secondary CPU 587 * bringup, we need to get at them in real mode. This means they 588 * must also be within the RMO region. 589 */ 590 limit = min(0x10000000UL, lmb.rmo_size); 591 592 for_each_cpu(i) 593 paca[i].emergency_sp = 594 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; 595 } 596 597 /* 598 * Called from setup_arch to initialize the bitmap of available 599 * syscalls in the systemcfg page 600 */ 601 void __init setup_syscall_map(void) 602 { 603 unsigned int i, count64 = 0, count32 = 0; 604 extern unsigned long *sys_call_table; 605 extern unsigned long sys_ni_syscall; 606 607 608 for (i = 0; i < __NR_syscalls; i++) { 609 if (sys_call_table[i*2] != sys_ni_syscall) { 610 count64++; 611 systemcfg->syscall_map_64[i >> 5] |= 612 0x80000000UL >> (i & 0x1f); 613 } 614 if (sys_call_table[i*2+1] != sys_ni_syscall) { 615 count32++; 616 systemcfg->syscall_map_32[i >> 5] |= 617 0x80000000UL >> (i & 0x1f); 618 } 619 } 620 printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n", 621 count32, count64); 622 } 623 624 /* 625 * Called into from start_kernel, after lock_kernel has been called. 626 * Initializes bootmem, which is unsed to manage page allocation until 627 * mem_init is called. 628 */ 629 void __init setup_arch(char **cmdline_p) 630 { 631 extern void do_init_bootmem(void); 632 633 ppc64_boot_msg(0x12, "Setup Arch"); 634 635 *cmdline_p = cmd_line; 636 637 /* 638 * Set cache line size based on type of cpu as a default. 639 * Systems with OF can look in the properties on the cpu node(s) 640 * for a possibly more accurate value. 641 */ 642 dcache_bsize = ppc64_caches.dline_size; 643 icache_bsize = ppc64_caches.iline_size; 644 645 /* reboot on panic */ 646 panic_timeout = 180; 647 648 if (ppc_md.panic) 649 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block); 650 651 init_mm.start_code = PAGE_OFFSET; 652 init_mm.end_code = (unsigned long) _etext; 653 init_mm.end_data = (unsigned long) _edata; 654 init_mm.brk = klimit; 655 656 irqstack_early_init(); 657 emergency_stack_init(); 658 659 stabs_alloc(); 660 661 /* set up the bootmem stuff with available memory */ 662 do_init_bootmem(); 663 sparse_init(); 664 665 /* initialize the syscall map in systemcfg */ 666 setup_syscall_map(); 667 668 #ifdef CONFIG_DUMMY_CONSOLE 669 conswitchp = &dummy_con; 670 #endif 671 672 ppc_md.setup_arch(); 673 674 /* Use the default idle loop if the platform hasn't provided one. */ 675 if (NULL == ppc_md.idle_loop) { 676 ppc_md.idle_loop = default_idle; 677 printk(KERN_INFO "Using default idle loop\n"); 678 } 679 680 paging_init(); 681 ppc64_boot_msg(0x15, "Setup Done"); 682 } 683 684 685 /* ToDo: do something useful if ppc_md is not yet setup. */ 686 #define PPC64_LINUX_FUNCTION 0x0f000000 687 #define PPC64_IPL_MESSAGE 0xc0000000 688 #define PPC64_TERM_MESSAGE 0xb0000000 689 690 static void ppc64_do_msg(unsigned int src, const char *msg) 691 { 692 if (ppc_md.progress) { 693 char buf[128]; 694 695 sprintf(buf, "%08X\n", src); 696 ppc_md.progress(buf, 0); 697 snprintf(buf, 128, "%s", msg); 698 ppc_md.progress(buf, 0); 699 } 700 } 701 702 /* Print a boot progress message. */ 703 void ppc64_boot_msg(unsigned int src, const char *msg) 704 { 705 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); 706 printk("[boot]%04x %s\n", src, msg); 707 } 708 709 /* Print a termination message (print only -- does not stop the kernel) */ 710 void ppc64_terminate_msg(unsigned int src, const char *msg) 711 { 712 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg); 713 printk("[terminate]%04x %s\n", src, msg); 714 } 715 716 #ifndef CONFIG_PPC_ISERIES 717 /* 718 * This function can be used by platforms to "find" legacy serial ports. 719 * It works for "serial" nodes under an "isa" node, and will try to 720 * respect the "ibm,aix-loc" property if any. It works with up to 8 721 * ports. 722 */ 723 724 #define MAX_LEGACY_SERIAL_PORTS 8 725 static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1]; 726 static unsigned int old_serial_count; 727 728 void __init generic_find_legacy_serial_ports(u64 *physport, 729 unsigned int *default_speed) 730 { 731 struct device_node *np; 732 u32 *sizeprop; 733 734 struct isa_reg_property { 735 u32 space; 736 u32 address; 737 u32 size; 738 }; 739 struct pci_reg_property { 740 struct pci_address addr; 741 u32 size_hi; 742 u32 size_lo; 743 }; 744 745 DBG(" -> generic_find_legacy_serial_port()\n"); 746 747 *physport = 0; 748 if (default_speed) 749 *default_speed = 0; 750 751 np = of_find_node_by_path("/"); 752 if (!np) 753 return; 754 755 /* First fill our array */ 756 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { 757 struct device_node *isa, *pci; 758 struct isa_reg_property *reg; 759 unsigned long phys_size, addr_size, io_base; 760 u32 *rangesp; 761 u32 *interrupts, *clk, *spd; 762 char *typep; 763 int index, rlen, rentsize; 764 765 /* Ok, first check if it's under an "isa" parent */ 766 isa = of_get_parent(np); 767 if (!isa || strcmp(isa->name, "isa")) { 768 DBG("%s: no isa parent found\n", np->full_name); 769 continue; 770 } 771 772 /* Now look for an "ibm,aix-loc" property that gives us ordering 773 * if any... 774 */ 775 typep = (char *)get_property(np, "ibm,aix-loc", NULL); 776 777 /* Get the ISA port number */ 778 reg = (struct isa_reg_property *)get_property(np, "reg", NULL); 779 if (reg == NULL) 780 goto next_port; 781 /* We assume the interrupt number isn't translated ... */ 782 interrupts = (u32 *)get_property(np, "interrupts", NULL); 783 /* get clock freq. if present */ 784 clk = (u32 *)get_property(np, "clock-frequency", NULL); 785 /* get default speed if present */ 786 spd = (u32 *)get_property(np, "current-speed", NULL); 787 /* Default to locate at end of array */ 788 index = old_serial_count; /* end of the array by default */ 789 790 /* If we have a location index, then use it */ 791 if (typep && *typep == 'S') { 792 index = simple_strtol(typep+1, NULL, 0) - 1; 793 /* if index is out of range, use end of array instead */ 794 if (index >= MAX_LEGACY_SERIAL_PORTS) 795 index = old_serial_count; 796 /* if our index is still out of range, that mean that 797 * array is full, we could scan for a free slot but that 798 * make little sense to bother, just skip the port 799 */ 800 if (index >= MAX_LEGACY_SERIAL_PORTS) 801 goto next_port; 802 if (index >= old_serial_count) 803 old_serial_count = index + 1; 804 /* Check if there is a port who already claimed our slot */ 805 if (serial_ports[index].iobase != 0) { 806 /* if we still have some room, move it, else override */ 807 if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) { 808 DBG("Moved legacy port %d -> %d\n", index, 809 old_serial_count); 810 serial_ports[old_serial_count++] = 811 serial_ports[index]; 812 } else { 813 DBG("Replacing legacy port %d\n", index); 814 } 815 } 816 } 817 if (index >= MAX_LEGACY_SERIAL_PORTS) 818 goto next_port; 819 if (index >= old_serial_count) 820 old_serial_count = index + 1; 821 822 /* Now fill the entry */ 823 memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port)); 824 serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16; 825 serial_ports[index].iobase = reg->address; 826 serial_ports[index].irq = interrupts ? interrupts[0] : 0; 827 serial_ports[index].flags = ASYNC_BOOT_AUTOCONF; 828 829 DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n", 830 index, 831 serial_ports[index].iobase, 832 serial_ports[index].irq, 833 serial_ports[index].uartclk); 834 835 /* Get phys address of IO reg for port 1 */ 836 if (index != 0) 837 goto next_port; 838 839 pci = of_get_parent(isa); 840 if (!pci) { 841 DBG("%s: no pci parent found\n", np->full_name); 842 goto next_port; 843 } 844 845 rangesp = (u32 *)get_property(pci, "ranges", &rlen); 846 if (rangesp == NULL) { 847 of_node_put(pci); 848 goto next_port; 849 } 850 rlen /= 4; 851 852 /* we need the #size-cells of the PCI bridge node itself */ 853 phys_size = 1; 854 sizeprop = (u32 *)get_property(pci, "#size-cells", NULL); 855 if (sizeprop != NULL) 856 phys_size = *sizeprop; 857 /* we need the parent #addr-cells */ 858 addr_size = prom_n_addr_cells(pci); 859 rentsize = 3 + addr_size + phys_size; 860 io_base = 0; 861 for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) { 862 if (((rangesp[0] >> 24) & 0x3) != 1) 863 continue; /* not IO space */ 864 io_base = rangesp[3]; 865 if (addr_size == 2) 866 io_base = (io_base << 32) | rangesp[4]; 867 } 868 if (io_base != 0) { 869 *physport = io_base + reg->address; 870 if (default_speed && spd) 871 *default_speed = *spd; 872 } 873 of_node_put(pci); 874 next_port: 875 of_node_put(isa); 876 } 877 878 DBG(" <- generic_find_legacy_serial_port()\n"); 879 } 880 881 static struct platform_device serial_device = { 882 .name = "serial8250", 883 .id = PLAT8250_DEV_PLATFORM, 884 .dev = { 885 .platform_data = serial_ports, 886 }, 887 }; 888 889 static int __init serial_dev_init(void) 890 { 891 return platform_device_register(&serial_device); 892 } 893 arch_initcall(serial_dev_init); 894 895 #endif /* CONFIG_PPC_ISERIES */ 896 897 int check_legacy_ioport(unsigned long base_port) 898 { 899 if (ppc_md.check_legacy_ioport == NULL) 900 return 0; 901 return ppc_md.check_legacy_ioport(base_port); 902 } 903 EXPORT_SYMBOL(check_legacy_ioport); 904 905 #ifdef CONFIG_XMON 906 static int __init early_xmon(char *p) 907 { 908 /* ensure xmon is enabled */ 909 if (p) { 910 if (strncmp(p, "on", 2) == 0) 911 xmon_init(1); 912 if (strncmp(p, "off", 3) == 0) 913 xmon_init(0); 914 if (strncmp(p, "early", 5) != 0) 915 return 0; 916 } 917 xmon_init(1); 918 debugger(NULL); 919 920 return 0; 921 } 922 early_param("xmon", early_xmon); 923 #endif 924 925 void cpu_die(void) 926 { 927 if (ppc_md.cpu_die) 928 ppc_md.cpu_die(); 929 } 930