1 /* 2 * Procedures for interfacing to Open Firmware. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG_PROM 17 18 /* we cannot use FORTIFY as it brings in new symbols */ 19 #define __NO_FORTIFY 20 21 #include <stdarg.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/threads.h> 26 #include <linux/spinlock.h> 27 #include <linux/types.h> 28 #include <linux/pci.h> 29 #include <linux/proc_fs.h> 30 #include <linux/stringify.h> 31 #include <linux/delay.h> 32 #include <linux/initrd.h> 33 #include <linux/bitops.h> 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 #include <asm/page.h> 37 #include <asm/processor.h> 38 #include <asm/irq.h> 39 #include <asm/io.h> 40 #include <asm/smp.h> 41 #include <asm/mmu.h> 42 #include <asm/pgtable.h> 43 #include <asm/iommu.h> 44 #include <asm/btext.h> 45 #include <asm/sections.h> 46 #include <asm/machdep.h> 47 #include <asm/opal.h> 48 #include <asm/asm-prototypes.h> 49 50 #include <linux/linux_logo.h> 51 52 /* 53 * Eventually bump that one up 54 */ 55 #define DEVTREE_CHUNK_SIZE 0x100000 56 57 /* 58 * This is the size of the local memory reserve map that gets copied 59 * into the boot params passed to the kernel. That size is totally 60 * flexible as the kernel just reads the list until it encounters an 61 * entry with size 0, so it can be changed without breaking binary 62 * compatibility 63 */ 64 #define MEM_RESERVE_MAP_SIZE 8 65 66 /* 67 * prom_init() is called very early on, before the kernel text 68 * and data have been mapped to KERNELBASE. At this point the code 69 * is running at whatever address it has been loaded at. 70 * On ppc32 we compile with -mrelocatable, which means that references 71 * to extern and static variables get relocated automatically. 72 * ppc64 objects are always relocatable, we just need to relocate the 73 * TOC. 74 * 75 * Because OF may have mapped I/O devices into the area starting at 76 * KERNELBASE, particularly on CHRP machines, we can't safely call 77 * OF once the kernel has been mapped to KERNELBASE. Therefore all 78 * OF calls must be done within prom_init(). 79 * 80 * ADDR is used in calls to call_prom. The 4th and following 81 * arguments to call_prom should be 32-bit values. 82 * On ppc64, 64 bit values are truncated to 32 bits (and 83 * fortunately don't get interpreted as two arguments). 84 */ 85 #define ADDR(x) (u32)(unsigned long)(x) 86 87 #ifdef CONFIG_PPC64 88 #define OF_WORKAROUNDS 0 89 #else 90 #define OF_WORKAROUNDS of_workarounds 91 int of_workarounds; 92 #endif 93 94 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 95 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 96 97 #define PROM_BUG() do { \ 98 prom_printf("kernel BUG at %s line 0x%x!\n", \ 99 __FILE__, __LINE__); \ 100 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 101 } while (0) 102 103 #ifdef DEBUG_PROM 104 #define prom_debug(x...) prom_printf(x) 105 #else 106 #define prom_debug(x...) 107 #endif 108 109 110 typedef u32 prom_arg_t; 111 112 struct prom_args { 113 __be32 service; 114 __be32 nargs; 115 __be32 nret; 116 __be32 args[10]; 117 }; 118 119 struct prom_t { 120 ihandle root; 121 phandle chosen; 122 int cpu; 123 ihandle stdout; 124 ihandle mmumap; 125 ihandle memory; 126 }; 127 128 struct mem_map_entry { 129 __be64 base; 130 __be64 size; 131 }; 132 133 typedef __be32 cell_t; 134 135 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 136 unsigned long r6, unsigned long r7, unsigned long r8, 137 unsigned long r9); 138 139 #ifdef CONFIG_PPC64 140 extern int enter_prom(struct prom_args *args, unsigned long entry); 141 #else 142 static inline int enter_prom(struct prom_args *args, unsigned long entry) 143 { 144 return ((int (*)(struct prom_args *))entry)(args); 145 } 146 #endif 147 148 extern void copy_and_flush(unsigned long dest, unsigned long src, 149 unsigned long size, unsigned long offset); 150 151 /* prom structure */ 152 static struct prom_t __initdata prom; 153 154 static unsigned long prom_entry __initdata; 155 156 #define PROM_SCRATCH_SIZE 256 157 158 static char __initdata of_stdout_device[256]; 159 static char __initdata prom_scratch[PROM_SCRATCH_SIZE]; 160 161 static unsigned long __initdata dt_header_start; 162 static unsigned long __initdata dt_struct_start, dt_struct_end; 163 static unsigned long __initdata dt_string_start, dt_string_end; 164 165 static unsigned long __initdata prom_initrd_start, prom_initrd_end; 166 167 #ifdef CONFIG_PPC64 168 static int __initdata prom_iommu_force_on; 169 static int __initdata prom_iommu_off; 170 static unsigned long __initdata prom_tce_alloc_start; 171 static unsigned long __initdata prom_tce_alloc_end; 172 #endif 173 174 static bool __initdata prom_radix_disable; 175 176 struct platform_support { 177 bool hash_mmu; 178 bool radix_mmu; 179 bool radix_gtse; 180 }; 181 182 /* Platforms codes are now obsolete in the kernel. Now only used within this 183 * file and ultimately gone too. Feel free to change them if you need, they 184 * are not shared with anything outside of this file anymore 185 */ 186 #define PLATFORM_PSERIES 0x0100 187 #define PLATFORM_PSERIES_LPAR 0x0101 188 #define PLATFORM_LPAR 0x0001 189 #define PLATFORM_POWERMAC 0x0400 190 #define PLATFORM_GENERIC 0x0500 191 #define PLATFORM_OPAL 0x0600 192 193 static int __initdata of_platform; 194 195 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; 196 197 static unsigned long __initdata prom_memory_limit; 198 199 static unsigned long __initdata alloc_top; 200 static unsigned long __initdata alloc_top_high; 201 static unsigned long __initdata alloc_bottom; 202 static unsigned long __initdata rmo_top; 203 static unsigned long __initdata ram_top; 204 205 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 206 static int __initdata mem_reserve_cnt; 207 208 static cell_t __initdata regbuf[1024]; 209 210 static bool rtas_has_query_cpu_stopped; 211 212 213 /* 214 * Error results ... some OF calls will return "-1" on error, some 215 * will return 0, some will return either. To simplify, here are 216 * macros to use with any ihandle or phandle return value to check if 217 * it is valid 218 */ 219 220 #define PROM_ERROR (-1u) 221 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 222 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 223 224 225 /* This is the one and *ONLY* place where we actually call open 226 * firmware. 227 */ 228 229 static int __init call_prom(const char *service, int nargs, int nret, ...) 230 { 231 int i; 232 struct prom_args args; 233 va_list list; 234 235 args.service = cpu_to_be32(ADDR(service)); 236 args.nargs = cpu_to_be32(nargs); 237 args.nret = cpu_to_be32(nret); 238 239 va_start(list, nret); 240 for (i = 0; i < nargs; i++) 241 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 242 va_end(list); 243 244 for (i = 0; i < nret; i++) 245 args.args[nargs+i] = 0; 246 247 if (enter_prom(&args, prom_entry) < 0) 248 return PROM_ERROR; 249 250 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 251 } 252 253 static int __init call_prom_ret(const char *service, int nargs, int nret, 254 prom_arg_t *rets, ...) 255 { 256 int i; 257 struct prom_args args; 258 va_list list; 259 260 args.service = cpu_to_be32(ADDR(service)); 261 args.nargs = cpu_to_be32(nargs); 262 args.nret = cpu_to_be32(nret); 263 264 va_start(list, rets); 265 for (i = 0; i < nargs; i++) 266 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 267 va_end(list); 268 269 for (i = 0; i < nret; i++) 270 args.args[nargs+i] = 0; 271 272 if (enter_prom(&args, prom_entry) < 0) 273 return PROM_ERROR; 274 275 if (rets != NULL) 276 for (i = 1; i < nret; ++i) 277 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 278 279 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 280 } 281 282 283 static void __init prom_print(const char *msg) 284 { 285 const char *p, *q; 286 287 if (prom.stdout == 0) 288 return; 289 290 for (p = msg; *p != 0; p = q) { 291 for (q = p; *q != 0 && *q != '\n'; ++q) 292 ; 293 if (q > p) 294 call_prom("write", 3, 1, prom.stdout, p, q - p); 295 if (*q == 0) 296 break; 297 ++q; 298 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 299 } 300 } 301 302 303 static void __init prom_print_hex(unsigned long val) 304 { 305 int i, nibbles = sizeof(val)*2; 306 char buf[sizeof(val)*2+1]; 307 308 for (i = nibbles-1; i >= 0; i--) { 309 buf[i] = (val & 0xf) + '0'; 310 if (buf[i] > '9') 311 buf[i] += ('a'-'0'-10); 312 val >>= 4; 313 } 314 buf[nibbles] = '\0'; 315 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 316 } 317 318 /* max number of decimal digits in an unsigned long */ 319 #define UL_DIGITS 21 320 static void __init prom_print_dec(unsigned long val) 321 { 322 int i, size; 323 char buf[UL_DIGITS+1]; 324 325 for (i = UL_DIGITS-1; i >= 0; i--) { 326 buf[i] = (val % 10) + '0'; 327 val = val/10; 328 if (val == 0) 329 break; 330 } 331 /* shift stuff down */ 332 size = UL_DIGITS - i; 333 call_prom("write", 3, 1, prom.stdout, buf+i, size); 334 } 335 336 static void __init prom_printf(const char *format, ...) 337 { 338 const char *p, *q, *s; 339 va_list args; 340 unsigned long v; 341 long vs; 342 343 va_start(args, format); 344 for (p = format; *p != 0; p = q) { 345 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 346 ; 347 if (q > p) 348 call_prom("write", 3, 1, prom.stdout, p, q - p); 349 if (*q == 0) 350 break; 351 if (*q == '\n') { 352 ++q; 353 call_prom("write", 3, 1, prom.stdout, 354 ADDR("\r\n"), 2); 355 continue; 356 } 357 ++q; 358 if (*q == 0) 359 break; 360 switch (*q) { 361 case 's': 362 ++q; 363 s = va_arg(args, const char *); 364 prom_print(s); 365 break; 366 case 'x': 367 ++q; 368 v = va_arg(args, unsigned long); 369 prom_print_hex(v); 370 break; 371 case 'd': 372 ++q; 373 vs = va_arg(args, int); 374 if (vs < 0) { 375 prom_print("-"); 376 vs = -vs; 377 } 378 prom_print_dec(vs); 379 break; 380 case 'l': 381 ++q; 382 if (*q == 0) 383 break; 384 else if (*q == 'x') { 385 ++q; 386 v = va_arg(args, unsigned long); 387 prom_print_hex(v); 388 } else if (*q == 'u') { /* '%lu' */ 389 ++q; 390 v = va_arg(args, unsigned long); 391 prom_print_dec(v); 392 } else if (*q == 'd') { /* %ld */ 393 ++q; 394 vs = va_arg(args, long); 395 if (vs < 0) { 396 prom_print("-"); 397 vs = -vs; 398 } 399 prom_print_dec(vs); 400 } 401 break; 402 } 403 } 404 va_end(args); 405 } 406 407 408 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 409 unsigned long align) 410 { 411 412 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 413 /* 414 * Old OF requires we claim physical and virtual separately 415 * and then map explicitly (assuming virtual mode) 416 */ 417 int ret; 418 prom_arg_t result; 419 420 ret = call_prom_ret("call-method", 5, 2, &result, 421 ADDR("claim"), prom.memory, 422 align, size, virt); 423 if (ret != 0 || result == -1) 424 return -1; 425 ret = call_prom_ret("call-method", 5, 2, &result, 426 ADDR("claim"), prom.mmumap, 427 align, size, virt); 428 if (ret != 0) { 429 call_prom("call-method", 4, 1, ADDR("release"), 430 prom.memory, size, virt); 431 return -1; 432 } 433 /* the 0x12 is M (coherence) + PP == read/write */ 434 call_prom("call-method", 6, 1, 435 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 436 return virt; 437 } 438 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 439 (prom_arg_t)align); 440 } 441 442 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 443 { 444 prom_print(reason); 445 /* Do not call exit because it clears the screen on pmac 446 * it also causes some sort of double-fault on early pmacs */ 447 if (of_platform == PLATFORM_POWERMAC) 448 asm("trap\n"); 449 450 /* ToDo: should put up an SRC here on pSeries */ 451 call_prom("exit", 0, 0); 452 453 for (;;) /* should never get here */ 454 ; 455 } 456 457 458 static int __init prom_next_node(phandle *nodep) 459 { 460 phandle node; 461 462 if ((node = *nodep) != 0 463 && (*nodep = call_prom("child", 1, 1, node)) != 0) 464 return 1; 465 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 466 return 1; 467 for (;;) { 468 if ((node = call_prom("parent", 1, 1, node)) == 0) 469 return 0; 470 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 471 return 1; 472 } 473 } 474 475 static inline int prom_getprop(phandle node, const char *pname, 476 void *value, size_t valuelen) 477 { 478 return call_prom("getprop", 4, 1, node, ADDR(pname), 479 (u32)(unsigned long) value, (u32) valuelen); 480 } 481 482 static inline int prom_getproplen(phandle node, const char *pname) 483 { 484 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 485 } 486 487 static void add_string(char **str, const char *q) 488 { 489 char *p = *str; 490 491 while (*q) 492 *p++ = *q++; 493 *p++ = ' '; 494 *str = p; 495 } 496 497 static char *tohex(unsigned int x) 498 { 499 static char digits[] = "0123456789abcdef"; 500 static char result[9]; 501 int i; 502 503 result[8] = 0; 504 i = 8; 505 do { 506 --i; 507 result[i] = digits[x & 0xf]; 508 x >>= 4; 509 } while (x != 0 && i > 0); 510 return &result[i]; 511 } 512 513 static int __init prom_setprop(phandle node, const char *nodename, 514 const char *pname, void *value, size_t valuelen) 515 { 516 char cmd[256], *p; 517 518 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 519 return call_prom("setprop", 4, 1, node, ADDR(pname), 520 (u32)(unsigned long) value, (u32) valuelen); 521 522 /* gah... setprop doesn't work on longtrail, have to use interpret */ 523 p = cmd; 524 add_string(&p, "dev"); 525 add_string(&p, nodename); 526 add_string(&p, tohex((u32)(unsigned long) value)); 527 add_string(&p, tohex(valuelen)); 528 add_string(&p, tohex(ADDR(pname))); 529 add_string(&p, tohex(strlen(pname))); 530 add_string(&p, "property"); 531 *p = 0; 532 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 533 } 534 535 /* We can't use the standard versions because of relocation headaches. */ 536 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 537 || ('a' <= (c) && (c) <= 'f') \ 538 || ('A' <= (c) && (c) <= 'F')) 539 540 #define isdigit(c) ('0' <= (c) && (c) <= '9') 541 #define islower(c) ('a' <= (c) && (c) <= 'z') 542 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 543 544 static unsigned long prom_strtoul(const char *cp, const char **endp) 545 { 546 unsigned long result = 0, base = 10, value; 547 548 if (*cp == '0') { 549 base = 8; 550 cp++; 551 if (toupper(*cp) == 'X') { 552 cp++; 553 base = 16; 554 } 555 } 556 557 while (isxdigit(*cp) && 558 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 559 result = result * base + value; 560 cp++; 561 } 562 563 if (endp) 564 *endp = cp; 565 566 return result; 567 } 568 569 static unsigned long prom_memparse(const char *ptr, const char **retptr) 570 { 571 unsigned long ret = prom_strtoul(ptr, retptr); 572 int shift = 0; 573 574 /* 575 * We can't use a switch here because GCC *may* generate a 576 * jump table which won't work, because we're not running at 577 * the address we're linked at. 578 */ 579 if ('G' == **retptr || 'g' == **retptr) 580 shift = 30; 581 582 if ('M' == **retptr || 'm' == **retptr) 583 shift = 20; 584 585 if ('K' == **retptr || 'k' == **retptr) 586 shift = 10; 587 588 if (shift) { 589 ret <<= shift; 590 (*retptr)++; 591 } 592 593 return ret; 594 } 595 596 /* 597 * Early parsing of the command line passed to the kernel, used for 598 * "mem=x" and the options that affect the iommu 599 */ 600 static void __init early_cmdline_parse(void) 601 { 602 const char *opt; 603 604 char *p; 605 int l = 0; 606 607 prom_cmd_line[0] = 0; 608 p = prom_cmd_line; 609 if ((long)prom.chosen > 0) 610 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 611 #ifdef CONFIG_CMDLINE 612 if (l <= 0 || p[0] == '\0') /* dbl check */ 613 strlcpy(prom_cmd_line, 614 CONFIG_CMDLINE, sizeof(prom_cmd_line)); 615 #endif /* CONFIG_CMDLINE */ 616 prom_printf("command line: %s\n", prom_cmd_line); 617 618 #ifdef CONFIG_PPC64 619 opt = strstr(prom_cmd_line, "iommu="); 620 if (opt) { 621 prom_printf("iommu opt is: %s\n", opt); 622 opt += 6; 623 while (*opt && *opt == ' ') 624 opt++; 625 if (!strncmp(opt, "off", 3)) 626 prom_iommu_off = 1; 627 else if (!strncmp(opt, "force", 5)) 628 prom_iommu_force_on = 1; 629 } 630 #endif 631 opt = strstr(prom_cmd_line, "mem="); 632 if (opt) { 633 opt += 4; 634 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 635 #ifdef CONFIG_PPC64 636 /* Align to 16 MB == size of ppc64 large page */ 637 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 638 #endif 639 } 640 641 opt = strstr(prom_cmd_line, "disable_radix"); 642 if (opt) { 643 prom_debug("Radix disabled from cmdline\n"); 644 prom_radix_disable = true; 645 } 646 } 647 648 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 649 /* 650 * The architecture vector has an array of PVR mask/value pairs, 651 * followed by # option vectors - 1, followed by the option vectors. 652 * 653 * See prom.h for the definition of the bits specified in the 654 * architecture vector. 655 */ 656 657 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 658 #define NUM_VECTORS(n) ((n) - 1) 659 660 /* 661 * Firmware expects 1 + n - 2, where n is the length of the option vector in 662 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 663 */ 664 #define VECTOR_LENGTH(n) (1 + (n) - 2) 665 666 struct option_vector1 { 667 u8 byte1; 668 u8 arch_versions; 669 u8 arch_versions3; 670 } __packed; 671 672 struct option_vector2 { 673 u8 byte1; 674 __be16 reserved; 675 __be32 real_base; 676 __be32 real_size; 677 __be32 virt_base; 678 __be32 virt_size; 679 __be32 load_base; 680 __be32 min_rma; 681 __be32 min_load; 682 u8 min_rma_percent; 683 u8 max_pft_size; 684 } __packed; 685 686 struct option_vector3 { 687 u8 byte1; 688 u8 byte2; 689 } __packed; 690 691 struct option_vector4 { 692 u8 byte1; 693 u8 min_vp_cap; 694 } __packed; 695 696 struct option_vector5 { 697 u8 byte1; 698 u8 byte2; 699 u8 byte3; 700 u8 cmo; 701 u8 associativity; 702 u8 bin_opts; 703 u8 micro_checkpoint; 704 u8 reserved0; 705 __be32 max_cpus; 706 __be16 papr_level; 707 __be16 reserved1; 708 u8 platform_facilities; 709 u8 reserved2; 710 __be16 reserved3; 711 u8 subprocessors; 712 u8 byte22; 713 u8 intarch; 714 u8 mmu; 715 u8 hash_ext; 716 u8 radix_ext; 717 } __packed; 718 719 struct option_vector6 { 720 u8 reserved; 721 u8 secondary_pteg; 722 u8 os_name; 723 } __packed; 724 725 struct ibm_arch_vec { 726 struct { u32 mask, val; } pvrs[12]; 727 728 u8 num_vectors; 729 730 u8 vec1_len; 731 struct option_vector1 vec1; 732 733 u8 vec2_len; 734 struct option_vector2 vec2; 735 736 u8 vec3_len; 737 struct option_vector3 vec3; 738 739 u8 vec4_len; 740 struct option_vector4 vec4; 741 742 u8 vec5_len; 743 struct option_vector5 vec5; 744 745 u8 vec6_len; 746 struct option_vector6 vec6; 747 } __packed; 748 749 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { 750 .pvrs = { 751 { 752 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 753 .val = cpu_to_be32(0x003a0000), 754 }, 755 { 756 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 757 .val = cpu_to_be32(0x003e0000), 758 }, 759 { 760 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 761 .val = cpu_to_be32(0x003f0000), 762 }, 763 { 764 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 765 .val = cpu_to_be32(0x004b0000), 766 }, 767 { 768 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 769 .val = cpu_to_be32(0x004c0000), 770 }, 771 { 772 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 773 .val = cpu_to_be32(0x004d0000), 774 }, 775 { 776 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 777 .val = cpu_to_be32(0x004e0000), 778 }, 779 { 780 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 781 .val = cpu_to_be32(0x0f000005), 782 }, 783 { 784 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 785 .val = cpu_to_be32(0x0f000004), 786 }, 787 { 788 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 789 .val = cpu_to_be32(0x0f000003), 790 }, 791 { 792 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 793 .val = cpu_to_be32(0x0f000002), 794 }, 795 { 796 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 797 .val = cpu_to_be32(0x0f000001), 798 }, 799 }, 800 801 .num_vectors = NUM_VECTORS(6), 802 803 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 804 .vec1 = { 805 .byte1 = 0, 806 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 807 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 808 .arch_versions3 = OV1_PPC_3_00, 809 }, 810 811 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 812 /* option vector 2: Open Firmware options supported */ 813 .vec2 = { 814 .byte1 = OV2_REAL_MODE, 815 .reserved = 0, 816 .real_base = cpu_to_be32(0xffffffff), 817 .real_size = cpu_to_be32(0xffffffff), 818 .virt_base = cpu_to_be32(0xffffffff), 819 .virt_size = cpu_to_be32(0xffffffff), 820 .load_base = cpu_to_be32(0xffffffff), 821 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 822 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 823 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 824 .max_pft_size = 48, /* max log_2(hash table size) */ 825 }, 826 827 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 828 /* option vector 3: processor options supported */ 829 .vec3 = { 830 .byte1 = 0, /* don't ignore, don't halt */ 831 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 832 }, 833 834 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 835 /* option vector 4: IBM PAPR implementation */ 836 .vec4 = { 837 .byte1 = 0, /* don't halt */ 838 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 839 }, 840 841 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 842 /* option vector 5: PAPR/OF options */ 843 .vec5 = { 844 .byte1 = 0, /* don't ignore, don't halt */ 845 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 846 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 847 #ifdef CONFIG_PCI_MSI 848 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 849 OV5_FEAT(OV5_MSI), 850 #else 851 0, 852 #endif 853 .byte3 = 0, 854 .cmo = 855 #ifdef CONFIG_PPC_SMLPAR 856 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 857 #else 858 0, 859 #endif 860 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 861 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 862 .micro_checkpoint = 0, 863 .reserved0 = 0, 864 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 865 .papr_level = 0, 866 .reserved1 = 0, 867 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 868 .reserved2 = 0, 869 .reserved3 = 0, 870 .subprocessors = 1, 871 .intarch = 0, 872 .mmu = 0, 873 .hash_ext = 0, 874 .radix_ext = 0, 875 }, 876 877 /* option vector 6: IBM PAPR hints */ 878 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 879 .vec6 = { 880 .reserved = 0, 881 .secondary_pteg = 0, 882 .os_name = OV6_LINUX, 883 }, 884 }; 885 886 /* Old method - ELF header with PT_NOTE sections only works on BE */ 887 #ifdef __BIG_ENDIAN__ 888 static struct fake_elf { 889 Elf32_Ehdr elfhdr; 890 Elf32_Phdr phdr[2]; 891 struct chrpnote { 892 u32 namesz; 893 u32 descsz; 894 u32 type; 895 char name[8]; /* "PowerPC" */ 896 struct chrpdesc { 897 u32 real_mode; 898 u32 real_base; 899 u32 real_size; 900 u32 virt_base; 901 u32 virt_size; 902 u32 load_base; 903 } chrpdesc; 904 } chrpnote; 905 struct rpanote { 906 u32 namesz; 907 u32 descsz; 908 u32 type; 909 char name[24]; /* "IBM,RPA-Client-Config" */ 910 struct rpadesc { 911 u32 lpar_affinity; 912 u32 min_rmo_size; 913 u32 min_rmo_percent; 914 u32 max_pft_size; 915 u32 splpar; 916 u32 min_load; 917 u32 new_mem_def; 918 u32 ignore_me; 919 } rpadesc; 920 } rpanote; 921 } fake_elf = { 922 .elfhdr = { 923 .e_ident = { 0x7f, 'E', 'L', 'F', 924 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 925 .e_type = ET_EXEC, /* yeah right */ 926 .e_machine = EM_PPC, 927 .e_version = EV_CURRENT, 928 .e_phoff = offsetof(struct fake_elf, phdr), 929 .e_phentsize = sizeof(Elf32_Phdr), 930 .e_phnum = 2 931 }, 932 .phdr = { 933 [0] = { 934 .p_type = PT_NOTE, 935 .p_offset = offsetof(struct fake_elf, chrpnote), 936 .p_filesz = sizeof(struct chrpnote) 937 }, [1] = { 938 .p_type = PT_NOTE, 939 .p_offset = offsetof(struct fake_elf, rpanote), 940 .p_filesz = sizeof(struct rpanote) 941 } 942 }, 943 .chrpnote = { 944 .namesz = sizeof("PowerPC"), 945 .descsz = sizeof(struct chrpdesc), 946 .type = 0x1275, 947 .name = "PowerPC", 948 .chrpdesc = { 949 .real_mode = ~0U, /* ~0 means "don't care" */ 950 .real_base = ~0U, 951 .real_size = ~0U, 952 .virt_base = ~0U, 953 .virt_size = ~0U, 954 .load_base = ~0U 955 }, 956 }, 957 .rpanote = { 958 .namesz = sizeof("IBM,RPA-Client-Config"), 959 .descsz = sizeof(struct rpadesc), 960 .type = 0x12759999, 961 .name = "IBM,RPA-Client-Config", 962 .rpadesc = { 963 .lpar_affinity = 0, 964 .min_rmo_size = 64, /* in megabytes */ 965 .min_rmo_percent = 0, 966 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 967 .splpar = 1, 968 .min_load = ~0U, 969 .new_mem_def = 0 970 } 971 } 972 }; 973 #endif /* __BIG_ENDIAN__ */ 974 975 static int __init prom_count_smt_threads(void) 976 { 977 phandle node; 978 char type[64]; 979 unsigned int plen; 980 981 /* Pick up th first CPU node we can find */ 982 for (node = 0; prom_next_node(&node); ) { 983 type[0] = 0; 984 prom_getprop(node, "device_type", type, sizeof(type)); 985 986 if (strcmp(type, "cpu")) 987 continue; 988 /* 989 * There is an entry for each smt thread, each entry being 990 * 4 bytes long. All cpus should have the same number of 991 * smt threads, so return after finding the first. 992 */ 993 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 994 if (plen == PROM_ERROR) 995 break; 996 plen >>= 2; 997 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 998 999 /* Sanity check */ 1000 if (plen < 1 || plen > 64) { 1001 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1002 (unsigned long)plen); 1003 return 1; 1004 } 1005 return plen; 1006 } 1007 prom_debug("No threads found, assuming 1 per core\n"); 1008 1009 return 1; 1010 1011 } 1012 1013 static void __init prom_parse_mmu_model(u8 val, 1014 struct platform_support *support) 1015 { 1016 switch (val) { 1017 case OV5_FEAT(OV5_MMU_DYNAMIC): 1018 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1019 prom_debug("MMU - either supported\n"); 1020 support->radix_mmu = !prom_radix_disable; 1021 support->hash_mmu = true; 1022 break; 1023 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1024 prom_debug("MMU - radix only\n"); 1025 if (prom_radix_disable) { 1026 /* 1027 * If we __have__ to do radix, we're better off ignoring 1028 * the command line rather than not booting. 1029 */ 1030 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1031 } 1032 support->radix_mmu = true; 1033 break; 1034 case OV5_FEAT(OV5_MMU_HASH): 1035 prom_debug("MMU - hash only\n"); 1036 support->hash_mmu = true; 1037 break; 1038 default: 1039 prom_debug("Unknown mmu support option: 0x%x\n", val); 1040 break; 1041 } 1042 } 1043 1044 static void __init prom_parse_platform_support(u8 index, u8 val, 1045 struct platform_support *support) 1046 { 1047 switch (index) { 1048 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1049 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1050 break; 1051 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1052 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1053 prom_debug("Radix - GTSE supported\n"); 1054 support->radix_gtse = true; 1055 } 1056 break; 1057 } 1058 } 1059 1060 static void __init prom_check_platform_support(void) 1061 { 1062 struct platform_support supported = { 1063 .hash_mmu = false, 1064 .radix_mmu = false, 1065 .radix_gtse = false 1066 }; 1067 int prop_len = prom_getproplen(prom.chosen, 1068 "ibm,arch-vec-5-platform-support"); 1069 if (prop_len > 1) { 1070 int i; 1071 u8 vec[prop_len]; 1072 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1073 prop_len); 1074 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1075 &vec, sizeof(vec)); 1076 for (i = 0; i < prop_len; i += 2) { 1077 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1078 , vec[i] 1079 , vec[i + 1]); 1080 prom_parse_platform_support(vec[i], vec[i + 1], 1081 &supported); 1082 } 1083 } 1084 1085 if (supported.radix_mmu && supported.radix_gtse) { 1086 /* Radix preferred - but we require GTSE for now */ 1087 prom_debug("Asking for radix with GTSE\n"); 1088 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1089 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1090 } else if (supported.hash_mmu) { 1091 /* Default to hash mmu (if we can) */ 1092 prom_debug("Asking for hash\n"); 1093 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1094 } else { 1095 /* We're probably on a legacy hypervisor */ 1096 prom_debug("Assuming legacy hash support\n"); 1097 } 1098 } 1099 1100 static void __init prom_send_capabilities(void) 1101 { 1102 ihandle root; 1103 prom_arg_t ret; 1104 u32 cores; 1105 1106 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1107 prom_check_platform_support(); 1108 1109 root = call_prom("open", 1, 1, ADDR("/")); 1110 if (root != 0) { 1111 /* We need to tell the FW about the number of cores we support. 1112 * 1113 * To do that, we count the number of threads on the first core 1114 * (we assume this is the same for all cores) and use it to 1115 * divide NR_CPUS. 1116 */ 1117 1118 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1119 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", 1120 cores, NR_CPUS); 1121 1122 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1123 1124 /* try calling the ibm,client-architecture-support method */ 1125 prom_printf("Calling ibm,client-architecture-support..."); 1126 if (call_prom_ret("call-method", 3, 2, &ret, 1127 ADDR("ibm,client-architecture-support"), 1128 root, 1129 ADDR(&ibm_architecture_vec)) == 0) { 1130 /* the call exists... */ 1131 if (ret) 1132 prom_printf("\nWARNING: ibm,client-architecture" 1133 "-support call FAILED!\n"); 1134 call_prom("close", 1, 0, root); 1135 prom_printf(" done\n"); 1136 return; 1137 } 1138 call_prom("close", 1, 0, root); 1139 prom_printf(" not implemented\n"); 1140 } 1141 1142 #ifdef __BIG_ENDIAN__ 1143 { 1144 ihandle elfloader; 1145 1146 /* no ibm,client-architecture-support call, try the old way */ 1147 elfloader = call_prom("open", 1, 1, 1148 ADDR("/packages/elf-loader")); 1149 if (elfloader == 0) { 1150 prom_printf("couldn't open /packages/elf-loader\n"); 1151 return; 1152 } 1153 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1154 elfloader, ADDR(&fake_elf)); 1155 call_prom("close", 1, 0, elfloader); 1156 } 1157 #endif /* __BIG_ENDIAN__ */ 1158 } 1159 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1160 1161 /* 1162 * Memory allocation strategy... our layout is normally: 1163 * 1164 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1165 * rare cases, initrd might end up being before the kernel though. 1166 * We assume this won't override the final kernel at 0, we have no 1167 * provision to handle that in this version, but it should hopefully 1168 * never happen. 1169 * 1170 * alloc_top is set to the top of RMO, eventually shrink down if the 1171 * TCEs overlap 1172 * 1173 * alloc_bottom is set to the top of kernel/initrd 1174 * 1175 * from there, allocations are done this way : rtas is allocated 1176 * topmost, and the device-tree is allocated from the bottom. We try 1177 * to grow the device-tree allocation as we progress. If we can't, 1178 * then we fail, we don't currently have a facility to restart 1179 * elsewhere, but that shouldn't be necessary. 1180 * 1181 * Note that calls to reserve_mem have to be done explicitly, memory 1182 * allocated with either alloc_up or alloc_down isn't automatically 1183 * reserved. 1184 */ 1185 1186 1187 /* 1188 * Allocates memory in the RMO upward from the kernel/initrd 1189 * 1190 * When align is 0, this is a special case, it means to allocate in place 1191 * at the current location of alloc_bottom or fail (that is basically 1192 * extending the previous allocation). Used for the device-tree flattening 1193 */ 1194 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1195 { 1196 unsigned long base = alloc_bottom; 1197 unsigned long addr = 0; 1198 1199 if (align) 1200 base = _ALIGN_UP(base, align); 1201 prom_debug("alloc_up(%x, %x)\n", size, align); 1202 if (ram_top == 0) 1203 prom_panic("alloc_up() called with mem not initialized\n"); 1204 1205 if (align) 1206 base = _ALIGN_UP(alloc_bottom, align); 1207 else 1208 base = alloc_bottom; 1209 1210 for(; (base + size) <= alloc_top; 1211 base = _ALIGN_UP(base + 0x100000, align)) { 1212 prom_debug(" trying: 0x%x\n\r", base); 1213 addr = (unsigned long)prom_claim(base, size, 0); 1214 if (addr != PROM_ERROR && addr != 0) 1215 break; 1216 addr = 0; 1217 if (align == 0) 1218 break; 1219 } 1220 if (addr == 0) 1221 return 0; 1222 alloc_bottom = addr + size; 1223 1224 prom_debug(" -> %x\n", addr); 1225 prom_debug(" alloc_bottom : %x\n", alloc_bottom); 1226 prom_debug(" alloc_top : %x\n", alloc_top); 1227 prom_debug(" alloc_top_hi : %x\n", alloc_top_high); 1228 prom_debug(" rmo_top : %x\n", rmo_top); 1229 prom_debug(" ram_top : %x\n", ram_top); 1230 1231 return addr; 1232 } 1233 1234 /* 1235 * Allocates memory downward, either from top of RMO, or if highmem 1236 * is set, from the top of RAM. Note that this one doesn't handle 1237 * failures. It does claim memory if highmem is not set. 1238 */ 1239 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1240 int highmem) 1241 { 1242 unsigned long base, addr = 0; 1243 1244 prom_debug("alloc_down(%x, %x, %s)\n", size, align, 1245 highmem ? "(high)" : "(low)"); 1246 if (ram_top == 0) 1247 prom_panic("alloc_down() called with mem not initialized\n"); 1248 1249 if (highmem) { 1250 /* Carve out storage for the TCE table. */ 1251 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1252 if (addr <= alloc_bottom) 1253 return 0; 1254 /* Will we bump into the RMO ? If yes, check out that we 1255 * didn't overlap existing allocations there, if we did, 1256 * we are dead, we must be the first in town ! 1257 */ 1258 if (addr < rmo_top) { 1259 /* Good, we are first */ 1260 if (alloc_top == rmo_top) 1261 alloc_top = rmo_top = addr; 1262 else 1263 return 0; 1264 } 1265 alloc_top_high = addr; 1266 goto bail; 1267 } 1268 1269 base = _ALIGN_DOWN(alloc_top - size, align); 1270 for (; base > alloc_bottom; 1271 base = _ALIGN_DOWN(base - 0x100000, align)) { 1272 prom_debug(" trying: 0x%x\n\r", base); 1273 addr = (unsigned long)prom_claim(base, size, 0); 1274 if (addr != PROM_ERROR && addr != 0) 1275 break; 1276 addr = 0; 1277 } 1278 if (addr == 0) 1279 return 0; 1280 alloc_top = addr; 1281 1282 bail: 1283 prom_debug(" -> %x\n", addr); 1284 prom_debug(" alloc_bottom : %x\n", alloc_bottom); 1285 prom_debug(" alloc_top : %x\n", alloc_top); 1286 prom_debug(" alloc_top_hi : %x\n", alloc_top_high); 1287 prom_debug(" rmo_top : %x\n", rmo_top); 1288 prom_debug(" ram_top : %x\n", ram_top); 1289 1290 return addr; 1291 } 1292 1293 /* 1294 * Parse a "reg" cell 1295 */ 1296 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1297 { 1298 cell_t *p = *cellp; 1299 unsigned long r = 0; 1300 1301 /* Ignore more than 2 cells */ 1302 while (s > sizeof(unsigned long) / 4) { 1303 p++; 1304 s--; 1305 } 1306 r = be32_to_cpu(*p++); 1307 #ifdef CONFIG_PPC64 1308 if (s > 1) { 1309 r <<= 32; 1310 r |= be32_to_cpu(*(p++)); 1311 } 1312 #endif 1313 *cellp = p; 1314 return r; 1315 } 1316 1317 /* 1318 * Very dumb function for adding to the memory reserve list, but 1319 * we don't need anything smarter at this point 1320 * 1321 * XXX Eventually check for collisions. They should NEVER happen. 1322 * If problems seem to show up, it would be a good start to track 1323 * them down. 1324 */ 1325 static void __init reserve_mem(u64 base, u64 size) 1326 { 1327 u64 top = base + size; 1328 unsigned long cnt = mem_reserve_cnt; 1329 1330 if (size == 0) 1331 return; 1332 1333 /* We need to always keep one empty entry so that we 1334 * have our terminator with "size" set to 0 since we are 1335 * dumb and just copy this entire array to the boot params 1336 */ 1337 base = _ALIGN_DOWN(base, PAGE_SIZE); 1338 top = _ALIGN_UP(top, PAGE_SIZE); 1339 size = top - base; 1340 1341 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1342 prom_panic("Memory reserve map exhausted !\n"); 1343 mem_reserve_map[cnt].base = cpu_to_be64(base); 1344 mem_reserve_map[cnt].size = cpu_to_be64(size); 1345 mem_reserve_cnt = cnt + 1; 1346 } 1347 1348 /* 1349 * Initialize memory allocation mechanism, parse "memory" nodes and 1350 * obtain that way the top of memory and RMO to setup out local allocator 1351 */ 1352 static void __init prom_init_mem(void) 1353 { 1354 phandle node; 1355 char *path, type[64]; 1356 unsigned int plen; 1357 cell_t *p, *endp; 1358 __be32 val; 1359 u32 rac, rsc; 1360 1361 /* 1362 * We iterate the memory nodes to find 1363 * 1) top of RMO (first node) 1364 * 2) top of memory 1365 */ 1366 val = cpu_to_be32(2); 1367 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1368 rac = be32_to_cpu(val); 1369 val = cpu_to_be32(1); 1370 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1371 rsc = be32_to_cpu(val); 1372 prom_debug("root_addr_cells: %x\n", rac); 1373 prom_debug("root_size_cells: %x\n", rsc); 1374 1375 prom_debug("scanning memory:\n"); 1376 path = prom_scratch; 1377 1378 for (node = 0; prom_next_node(&node); ) { 1379 type[0] = 0; 1380 prom_getprop(node, "device_type", type, sizeof(type)); 1381 1382 if (type[0] == 0) { 1383 /* 1384 * CHRP Longtrail machines have no device_type 1385 * on the memory node, so check the name instead... 1386 */ 1387 prom_getprop(node, "name", type, sizeof(type)); 1388 } 1389 if (strcmp(type, "memory")) 1390 continue; 1391 1392 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1393 if (plen > sizeof(regbuf)) { 1394 prom_printf("memory node too large for buffer !\n"); 1395 plen = sizeof(regbuf); 1396 } 1397 p = regbuf; 1398 endp = p + (plen / sizeof(cell_t)); 1399 1400 #ifdef DEBUG_PROM 1401 memset(path, 0, PROM_SCRATCH_SIZE); 1402 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1403 prom_debug(" node %s :\n", path); 1404 #endif /* DEBUG_PROM */ 1405 1406 while ((endp - p) >= (rac + rsc)) { 1407 unsigned long base, size; 1408 1409 base = prom_next_cell(rac, &p); 1410 size = prom_next_cell(rsc, &p); 1411 1412 if (size == 0) 1413 continue; 1414 prom_debug(" %x %x\n", base, size); 1415 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1416 rmo_top = size; 1417 if ((base + size) > ram_top) 1418 ram_top = base + size; 1419 } 1420 } 1421 1422 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1423 1424 /* 1425 * If prom_memory_limit is set we reduce the upper limits *except* for 1426 * alloc_top_high. This must be the real top of RAM so we can put 1427 * TCE's up there. 1428 */ 1429 1430 alloc_top_high = ram_top; 1431 1432 if (prom_memory_limit) { 1433 if (prom_memory_limit <= alloc_bottom) { 1434 prom_printf("Ignoring mem=%x <= alloc_bottom.\n", 1435 prom_memory_limit); 1436 prom_memory_limit = 0; 1437 } else if (prom_memory_limit >= ram_top) { 1438 prom_printf("Ignoring mem=%x >= ram_top.\n", 1439 prom_memory_limit); 1440 prom_memory_limit = 0; 1441 } else { 1442 ram_top = prom_memory_limit; 1443 rmo_top = min(rmo_top, prom_memory_limit); 1444 } 1445 } 1446 1447 /* 1448 * Setup our top alloc point, that is top of RMO or top of 1449 * segment 0 when running non-LPAR. 1450 * Some RS64 machines have buggy firmware where claims up at 1451 * 1GB fail. Cap at 768MB as a workaround. 1452 * Since 768MB is plenty of room, and we need to cap to something 1453 * reasonable on 32-bit, cap at 768MB on all machines. 1454 */ 1455 if (!rmo_top) 1456 rmo_top = ram_top; 1457 rmo_top = min(0x30000000ul, rmo_top); 1458 alloc_top = rmo_top; 1459 alloc_top_high = ram_top; 1460 1461 /* 1462 * Check if we have an initrd after the kernel but still inside 1463 * the RMO. If we do move our bottom point to after it. 1464 */ 1465 if (prom_initrd_start && 1466 prom_initrd_start < rmo_top && 1467 prom_initrd_end > alloc_bottom) 1468 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1469 1470 prom_printf("memory layout at init:\n"); 1471 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); 1472 prom_printf(" alloc_bottom : %x\n", alloc_bottom); 1473 prom_printf(" alloc_top : %x\n", alloc_top); 1474 prom_printf(" alloc_top_hi : %x\n", alloc_top_high); 1475 prom_printf(" rmo_top : %x\n", rmo_top); 1476 prom_printf(" ram_top : %x\n", ram_top); 1477 } 1478 1479 static void __init prom_close_stdin(void) 1480 { 1481 __be32 val; 1482 ihandle stdin; 1483 1484 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1485 stdin = be32_to_cpu(val); 1486 call_prom("close", 1, 0, stdin); 1487 } 1488 } 1489 1490 #ifdef CONFIG_PPC_POWERNV 1491 1492 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1493 static u64 __initdata prom_opal_base; 1494 static u64 __initdata prom_opal_entry; 1495 #endif 1496 1497 /* 1498 * Allocate room for and instantiate OPAL 1499 */ 1500 static void __init prom_instantiate_opal(void) 1501 { 1502 phandle opal_node; 1503 ihandle opal_inst; 1504 u64 base, entry; 1505 u64 size = 0, align = 0x10000; 1506 __be64 val64; 1507 u32 rets[2]; 1508 1509 prom_debug("prom_instantiate_opal: start...\n"); 1510 1511 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal")); 1512 prom_debug("opal_node: %x\n", opal_node); 1513 if (!PHANDLE_VALID(opal_node)) 1514 return; 1515 1516 val64 = 0; 1517 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64)); 1518 size = be64_to_cpu(val64); 1519 if (size == 0) 1520 return; 1521 val64 = 0; 1522 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64)); 1523 align = be64_to_cpu(val64); 1524 1525 base = alloc_down(size, align, 0); 1526 if (base == 0) { 1527 prom_printf("OPAL allocation failed !\n"); 1528 return; 1529 } 1530 1531 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal")); 1532 if (!IHANDLE_VALID(opal_inst)) { 1533 prom_printf("opening opal package failed (%x)\n", opal_inst); 1534 return; 1535 } 1536 1537 prom_printf("instantiating opal at 0x%x...", base); 1538 1539 if (call_prom_ret("call-method", 4, 3, rets, 1540 ADDR("load-opal-runtime"), 1541 opal_inst, 1542 base >> 32, base & 0xffffffff) != 0 1543 || (rets[0] == 0 && rets[1] == 0)) { 1544 prom_printf(" failed\n"); 1545 return; 1546 } 1547 entry = (((u64)rets[0]) << 32) | rets[1]; 1548 1549 prom_printf(" done\n"); 1550 1551 reserve_mem(base, size); 1552 1553 prom_debug("opal base = 0x%x\n", base); 1554 prom_debug("opal align = 0x%x\n", align); 1555 prom_debug("opal entry = 0x%x\n", entry); 1556 prom_debug("opal size = 0x%x\n", (long)size); 1557 1558 prom_setprop(opal_node, "/ibm,opal", "opal-base-address", 1559 &base, sizeof(base)); 1560 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address", 1561 &entry, sizeof(entry)); 1562 1563 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1564 prom_opal_base = base; 1565 prom_opal_entry = entry; 1566 #endif 1567 prom_debug("prom_instantiate_opal: end...\n"); 1568 } 1569 1570 #endif /* CONFIG_PPC_POWERNV */ 1571 1572 /* 1573 * Allocate room for and instantiate RTAS 1574 */ 1575 static void __init prom_instantiate_rtas(void) 1576 { 1577 phandle rtas_node; 1578 ihandle rtas_inst; 1579 u32 base, entry = 0; 1580 __be32 val; 1581 u32 size = 0; 1582 1583 prom_debug("prom_instantiate_rtas: start...\n"); 1584 1585 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1586 prom_debug("rtas_node: %x\n", rtas_node); 1587 if (!PHANDLE_VALID(rtas_node)) 1588 return; 1589 1590 val = 0; 1591 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1592 size = be32_to_cpu(val); 1593 if (size == 0) 1594 return; 1595 1596 base = alloc_down(size, PAGE_SIZE, 0); 1597 if (base == 0) 1598 prom_panic("Could not allocate memory for RTAS\n"); 1599 1600 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1601 if (!IHANDLE_VALID(rtas_inst)) { 1602 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1603 return; 1604 } 1605 1606 prom_printf("instantiating rtas at 0x%x...", base); 1607 1608 if (call_prom_ret("call-method", 3, 2, &entry, 1609 ADDR("instantiate-rtas"), 1610 rtas_inst, base) != 0 1611 || entry == 0) { 1612 prom_printf(" failed\n"); 1613 return; 1614 } 1615 prom_printf(" done\n"); 1616 1617 reserve_mem(base, size); 1618 1619 val = cpu_to_be32(base); 1620 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1621 &val, sizeof(val)); 1622 val = cpu_to_be32(entry); 1623 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1624 &val, sizeof(val)); 1625 1626 /* Check if it supports "query-cpu-stopped-state" */ 1627 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1628 &val, sizeof(val)) != PROM_ERROR) 1629 rtas_has_query_cpu_stopped = true; 1630 1631 prom_debug("rtas base = 0x%x\n", base); 1632 prom_debug("rtas entry = 0x%x\n", entry); 1633 prom_debug("rtas size = 0x%x\n", (long)size); 1634 1635 prom_debug("prom_instantiate_rtas: end...\n"); 1636 } 1637 1638 #ifdef CONFIG_PPC64 1639 /* 1640 * Allocate room for and instantiate Stored Measurement Log (SML) 1641 */ 1642 static void __init prom_instantiate_sml(void) 1643 { 1644 phandle ibmvtpm_node; 1645 ihandle ibmvtpm_inst; 1646 u32 entry = 0, size = 0, succ = 0; 1647 u64 base; 1648 __be32 val; 1649 1650 prom_debug("prom_instantiate_sml: start...\n"); 1651 1652 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1653 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1654 if (!PHANDLE_VALID(ibmvtpm_node)) 1655 return; 1656 1657 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1658 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1659 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1660 return; 1661 } 1662 1663 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1664 &val, sizeof(val)) != PROM_ERROR) { 1665 if (call_prom_ret("call-method", 2, 2, &succ, 1666 ADDR("reformat-sml-to-efi-alignment"), 1667 ibmvtpm_inst) != 0 || succ == 0) { 1668 prom_printf("Reformat SML to EFI alignment failed\n"); 1669 return; 1670 } 1671 1672 if (call_prom_ret("call-method", 2, 2, &size, 1673 ADDR("sml-get-allocated-size"), 1674 ibmvtpm_inst) != 0 || size == 0) { 1675 prom_printf("SML get allocated size failed\n"); 1676 return; 1677 } 1678 } else { 1679 if (call_prom_ret("call-method", 2, 2, &size, 1680 ADDR("sml-get-handover-size"), 1681 ibmvtpm_inst) != 0 || size == 0) { 1682 prom_printf("SML get handover size failed\n"); 1683 return; 1684 } 1685 } 1686 1687 base = alloc_down(size, PAGE_SIZE, 0); 1688 if (base == 0) 1689 prom_panic("Could not allocate memory for sml\n"); 1690 1691 prom_printf("instantiating sml at 0x%x...", base); 1692 1693 memset((void *)base, 0, size); 1694 1695 if (call_prom_ret("call-method", 4, 2, &entry, 1696 ADDR("sml-handover"), 1697 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1698 prom_printf("SML handover failed\n"); 1699 return; 1700 } 1701 prom_printf(" done\n"); 1702 1703 reserve_mem(base, size); 1704 1705 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1706 &base, sizeof(base)); 1707 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1708 &size, sizeof(size)); 1709 1710 prom_debug("sml base = 0x%x\n", base); 1711 prom_debug("sml size = 0x%x\n", (long)size); 1712 1713 prom_debug("prom_instantiate_sml: end...\n"); 1714 } 1715 1716 /* 1717 * Allocate room for and initialize TCE tables 1718 */ 1719 #ifdef __BIG_ENDIAN__ 1720 static void __init prom_initialize_tce_table(void) 1721 { 1722 phandle node; 1723 ihandle phb_node; 1724 char compatible[64], type[64], model[64]; 1725 char *path = prom_scratch; 1726 u64 base, align; 1727 u32 minalign, minsize; 1728 u64 tce_entry, *tce_entryp; 1729 u64 local_alloc_top, local_alloc_bottom; 1730 u64 i; 1731 1732 if (prom_iommu_off) 1733 return; 1734 1735 prom_debug("starting prom_initialize_tce_table\n"); 1736 1737 /* Cache current top of allocs so we reserve a single block */ 1738 local_alloc_top = alloc_top_high; 1739 local_alloc_bottom = local_alloc_top; 1740 1741 /* Search all nodes looking for PHBs. */ 1742 for (node = 0; prom_next_node(&node); ) { 1743 compatible[0] = 0; 1744 type[0] = 0; 1745 model[0] = 0; 1746 prom_getprop(node, "compatible", 1747 compatible, sizeof(compatible)); 1748 prom_getprop(node, "device_type", type, sizeof(type)); 1749 prom_getprop(node, "model", model, sizeof(model)); 1750 1751 if ((type[0] == 0) || (strstr(type, "pci") == NULL)) 1752 continue; 1753 1754 /* Keep the old logic intact to avoid regression. */ 1755 if (compatible[0] != 0) { 1756 if ((strstr(compatible, "python") == NULL) && 1757 (strstr(compatible, "Speedwagon") == NULL) && 1758 (strstr(compatible, "Winnipeg") == NULL)) 1759 continue; 1760 } else if (model[0] != 0) { 1761 if ((strstr(model, "ython") == NULL) && 1762 (strstr(model, "peedwagon") == NULL) && 1763 (strstr(model, "innipeg") == NULL)) 1764 continue; 1765 } 1766 1767 if (prom_getprop(node, "tce-table-minalign", &minalign, 1768 sizeof(minalign)) == PROM_ERROR) 1769 minalign = 0; 1770 if (prom_getprop(node, "tce-table-minsize", &minsize, 1771 sizeof(minsize)) == PROM_ERROR) 1772 minsize = 4UL << 20; 1773 1774 /* 1775 * Even though we read what OF wants, we just set the table 1776 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1777 * By doing this, we avoid the pitfalls of trying to DMA to 1778 * MMIO space and the DMA alias hole. 1779 * 1780 * On POWER4, firmware sets the TCE region by assuming 1781 * each TCE table is 8MB. Using this memory for anything 1782 * else will impact performance, so we always allocate 8MB. 1783 * Anton 1784 */ 1785 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p)) 1786 minsize = 8UL << 20; 1787 else 1788 minsize = 4UL << 20; 1789 1790 /* Align to the greater of the align or size */ 1791 align = max(minalign, minsize); 1792 base = alloc_down(minsize, align, 1); 1793 if (base == 0) 1794 prom_panic("ERROR, cannot find space for TCE table.\n"); 1795 if (base < local_alloc_bottom) 1796 local_alloc_bottom = base; 1797 1798 /* It seems OF doesn't null-terminate the path :-( */ 1799 memset(path, 0, PROM_SCRATCH_SIZE); 1800 /* Call OF to setup the TCE hardware */ 1801 if (call_prom("package-to-path", 3, 1, node, 1802 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) { 1803 prom_printf("package-to-path failed\n"); 1804 } 1805 1806 /* Save away the TCE table attributes for later use. */ 1807 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1808 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1809 1810 prom_debug("TCE table: %s\n", path); 1811 prom_debug("\tnode = 0x%x\n", node); 1812 prom_debug("\tbase = 0x%x\n", base); 1813 prom_debug("\tsize = 0x%x\n", minsize); 1814 1815 /* Initialize the table to have a one-to-one mapping 1816 * over the allocated size. 1817 */ 1818 tce_entryp = (u64 *)base; 1819 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1820 tce_entry = (i << PAGE_SHIFT); 1821 tce_entry |= 0x3; 1822 *tce_entryp = tce_entry; 1823 } 1824 1825 prom_printf("opening PHB %s", path); 1826 phb_node = call_prom("open", 1, 1, path); 1827 if (phb_node == 0) 1828 prom_printf("... failed\n"); 1829 else 1830 prom_printf("... done\n"); 1831 1832 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 1833 phb_node, -1, minsize, 1834 (u32) base, (u32) (base >> 32)); 1835 call_prom("close", 1, 0, phb_node); 1836 } 1837 1838 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 1839 1840 /* These are only really needed if there is a memory limit in 1841 * effect, but we don't know so export them always. */ 1842 prom_tce_alloc_start = local_alloc_bottom; 1843 prom_tce_alloc_end = local_alloc_top; 1844 1845 /* Flag the first invalid entry */ 1846 prom_debug("ending prom_initialize_tce_table\n"); 1847 } 1848 #endif /* __BIG_ENDIAN__ */ 1849 #endif /* CONFIG_PPC64 */ 1850 1851 /* 1852 * With CHRP SMP we need to use the OF to start the other processors. 1853 * We can't wait until smp_boot_cpus (the OF is trashed by then) 1854 * so we have to put the processors into a holding pattern controlled 1855 * by the kernel (not OF) before we destroy the OF. 1856 * 1857 * This uses a chunk of low memory, puts some holding pattern 1858 * code there and sends the other processors off to there until 1859 * smp_boot_cpus tells them to do something. The holding pattern 1860 * checks that address until its cpu # is there, when it is that 1861 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 1862 * of setting those values. 1863 * 1864 * We also use physical address 0x4 here to tell when a cpu 1865 * is in its holding pattern code. 1866 * 1867 * -- Cort 1868 */ 1869 /* 1870 * We want to reference the copy of __secondary_hold_* in the 1871 * 0 - 0x100 address range 1872 */ 1873 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 1874 1875 static void __init prom_hold_cpus(void) 1876 { 1877 unsigned long i; 1878 phandle node; 1879 char type[64]; 1880 unsigned long *spinloop 1881 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1882 unsigned long *acknowledge 1883 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1884 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1885 1886 /* 1887 * On pseries, if RTAS supports "query-cpu-stopped-state", 1888 * we skip this stage, the CPUs will be started by the 1889 * kernel using RTAS. 1890 */ 1891 if ((of_platform == PLATFORM_PSERIES || 1892 of_platform == PLATFORM_PSERIES_LPAR) && 1893 rtas_has_query_cpu_stopped) { 1894 prom_printf("prom_hold_cpus: skipped\n"); 1895 return; 1896 } 1897 1898 prom_debug("prom_hold_cpus: start...\n"); 1899 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1900 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1901 prom_debug(" 1) acknowledge = 0x%x\n", 1902 (unsigned long)acknowledge); 1903 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge); 1904 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold); 1905 1906 /* Set the common spinloop variable, so all of the secondary cpus 1907 * will block when they are awakened from their OF spinloop. 1908 * This must occur for both SMP and non SMP kernels, since OF will 1909 * be trashed when we move the kernel. 1910 */ 1911 *spinloop = 0; 1912 1913 /* look for cpus */ 1914 for (node = 0; prom_next_node(&node); ) { 1915 unsigned int cpu_no; 1916 __be32 reg; 1917 1918 type[0] = 0; 1919 prom_getprop(node, "device_type", type, sizeof(type)); 1920 if (strcmp(type, "cpu") != 0) 1921 continue; 1922 1923 /* Skip non-configured cpus. */ 1924 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 1925 if (strcmp(type, "okay") != 0) 1926 continue; 1927 1928 reg = cpu_to_be32(-1); /* make sparse happy */ 1929 prom_getprop(node, "reg", ®, sizeof(reg)); 1930 cpu_no = be32_to_cpu(reg); 1931 1932 prom_debug("cpu hw idx = %lu\n", cpu_no); 1933 1934 /* Init the acknowledge var which will be reset by 1935 * the secondary cpu when it awakens from its OF 1936 * spinloop. 1937 */ 1938 *acknowledge = (unsigned long)-1; 1939 1940 if (cpu_no != prom.cpu) { 1941 /* Primary Thread of non-boot cpu or any thread */ 1942 prom_printf("starting cpu hw idx %lu... ", cpu_no); 1943 call_prom("start-cpu", 3, 0, node, 1944 secondary_hold, cpu_no); 1945 1946 for (i = 0; (i < 100000000) && 1947 (*acknowledge == ((unsigned long)-1)); i++ ) 1948 mb(); 1949 1950 if (*acknowledge == cpu_no) 1951 prom_printf("done\n"); 1952 else 1953 prom_printf("failed: %x\n", *acknowledge); 1954 } 1955 #ifdef CONFIG_SMP 1956 else 1957 prom_printf("boot cpu hw idx %lu\n", cpu_no); 1958 #endif /* CONFIG_SMP */ 1959 } 1960 1961 prom_debug("prom_hold_cpus: end...\n"); 1962 } 1963 1964 1965 static void __init prom_init_client_services(unsigned long pp) 1966 { 1967 /* Get a handle to the prom entry point before anything else */ 1968 prom_entry = pp; 1969 1970 /* get a handle for the stdout device */ 1971 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 1972 if (!PHANDLE_VALID(prom.chosen)) 1973 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 1974 1975 /* get device tree root */ 1976 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 1977 if (!PHANDLE_VALID(prom.root)) 1978 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 1979 1980 prom.mmumap = 0; 1981 } 1982 1983 #ifdef CONFIG_PPC32 1984 /* 1985 * For really old powermacs, we need to map things we claim. 1986 * For that, we need the ihandle of the mmu. 1987 * Also, on the longtrail, we need to work around other bugs. 1988 */ 1989 static void __init prom_find_mmu(void) 1990 { 1991 phandle oprom; 1992 char version[64]; 1993 1994 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 1995 if (!PHANDLE_VALID(oprom)) 1996 return; 1997 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 1998 return; 1999 version[sizeof(version) - 1] = 0; 2000 /* XXX might need to add other versions here */ 2001 if (strcmp(version, "Open Firmware, 1.0.5") == 0) 2002 of_workarounds = OF_WA_CLAIM; 2003 else if (strncmp(version, "FirmWorks,3.", 12) == 0) { 2004 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2005 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2006 } else 2007 return; 2008 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2009 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2010 sizeof(prom.mmumap)); 2011 prom.mmumap = be32_to_cpu(prom.mmumap); 2012 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2013 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2014 } 2015 #else 2016 #define prom_find_mmu() 2017 #endif 2018 2019 static void __init prom_init_stdout(void) 2020 { 2021 char *path = of_stdout_device; 2022 char type[16]; 2023 phandle stdout_node; 2024 __be32 val; 2025 2026 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2027 prom_panic("cannot find stdout"); 2028 2029 prom.stdout = be32_to_cpu(val); 2030 2031 /* Get the full OF pathname of the stdout device */ 2032 memset(path, 0, 256); 2033 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2034 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2035 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2036 path, strlen(path) + 1); 2037 2038 /* instance-to-package fails on PA-Semi */ 2039 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2040 if (stdout_node != PROM_ERROR) { 2041 val = cpu_to_be32(stdout_node); 2042 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", 2043 &val, sizeof(val)); 2044 2045 /* If it's a display, note it */ 2046 memset(type, 0, sizeof(type)); 2047 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2048 if (strcmp(type, "display") == 0) 2049 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2050 } 2051 } 2052 2053 static int __init prom_find_machine_type(void) 2054 { 2055 char compat[256]; 2056 int len, i = 0; 2057 #ifdef CONFIG_PPC64 2058 phandle rtas; 2059 int x; 2060 #endif 2061 2062 /* Look for a PowerMac or a Cell */ 2063 len = prom_getprop(prom.root, "compatible", 2064 compat, sizeof(compat)-1); 2065 if (len > 0) { 2066 compat[len] = 0; 2067 while (i < len) { 2068 char *p = &compat[i]; 2069 int sl = strlen(p); 2070 if (sl == 0) 2071 break; 2072 if (strstr(p, "Power Macintosh") || 2073 strstr(p, "MacRISC")) 2074 return PLATFORM_POWERMAC; 2075 #ifdef CONFIG_PPC64 2076 /* We must make sure we don't detect the IBM Cell 2077 * blades as pSeries due to some firmware issues, 2078 * so we do it here. 2079 */ 2080 if (strstr(p, "IBM,CBEA") || 2081 strstr(p, "IBM,CPBW-1.0")) 2082 return PLATFORM_GENERIC; 2083 #endif /* CONFIG_PPC64 */ 2084 i += sl + 1; 2085 } 2086 } 2087 #ifdef CONFIG_PPC64 2088 /* Try to detect OPAL */ 2089 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal")))) 2090 return PLATFORM_OPAL; 2091 2092 /* Try to figure out if it's an IBM pSeries or any other 2093 * PAPR compliant platform. We assume it is if : 2094 * - /device_type is "chrp" (please, do NOT use that for future 2095 * non-IBM designs ! 2096 * - it has /rtas 2097 */ 2098 len = prom_getprop(prom.root, "device_type", 2099 compat, sizeof(compat)-1); 2100 if (len <= 0) 2101 return PLATFORM_GENERIC; 2102 if (strcmp(compat, "chrp")) 2103 return PLATFORM_GENERIC; 2104 2105 /* Default to pSeries. We need to know if we are running LPAR */ 2106 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2107 if (!PHANDLE_VALID(rtas)) 2108 return PLATFORM_GENERIC; 2109 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2110 if (x != PROM_ERROR) { 2111 prom_debug("Hypertas detected, assuming LPAR !\n"); 2112 return PLATFORM_PSERIES_LPAR; 2113 } 2114 return PLATFORM_PSERIES; 2115 #else 2116 return PLATFORM_GENERIC; 2117 #endif 2118 } 2119 2120 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2121 { 2122 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2123 } 2124 2125 /* 2126 * If we have a display that we don't know how to drive, 2127 * we will want to try to execute OF's open method for it 2128 * later. However, OF will probably fall over if we do that 2129 * we've taken over the MMU. 2130 * So we check whether we will need to open the display, 2131 * and if so, open it now. 2132 */ 2133 static void __init prom_check_displays(void) 2134 { 2135 char type[16], *path; 2136 phandle node; 2137 ihandle ih; 2138 int i; 2139 2140 static unsigned char default_colors[] = { 2141 0x00, 0x00, 0x00, 2142 0x00, 0x00, 0xaa, 2143 0x00, 0xaa, 0x00, 2144 0x00, 0xaa, 0xaa, 2145 0xaa, 0x00, 0x00, 2146 0xaa, 0x00, 0xaa, 2147 0xaa, 0xaa, 0x00, 2148 0xaa, 0xaa, 0xaa, 2149 0x55, 0x55, 0x55, 2150 0x55, 0x55, 0xff, 2151 0x55, 0xff, 0x55, 2152 0x55, 0xff, 0xff, 2153 0xff, 0x55, 0x55, 2154 0xff, 0x55, 0xff, 2155 0xff, 0xff, 0x55, 2156 0xff, 0xff, 0xff 2157 }; 2158 const unsigned char *clut; 2159 2160 prom_debug("Looking for displays\n"); 2161 for (node = 0; prom_next_node(&node); ) { 2162 memset(type, 0, sizeof(type)); 2163 prom_getprop(node, "device_type", type, sizeof(type)); 2164 if (strcmp(type, "display") != 0) 2165 continue; 2166 2167 /* It seems OF doesn't null-terminate the path :-( */ 2168 path = prom_scratch; 2169 memset(path, 0, PROM_SCRATCH_SIZE); 2170 2171 /* 2172 * leave some room at the end of the path for appending extra 2173 * arguments 2174 */ 2175 if (call_prom("package-to-path", 3, 1, node, path, 2176 PROM_SCRATCH_SIZE-10) == PROM_ERROR) 2177 continue; 2178 prom_printf("found display : %s, opening... ", path); 2179 2180 ih = call_prom("open", 1, 1, path); 2181 if (ih == 0) { 2182 prom_printf("failed\n"); 2183 continue; 2184 } 2185 2186 /* Success */ 2187 prom_printf("done\n"); 2188 prom_setprop(node, path, "linux,opened", NULL, 0); 2189 2190 /* Setup a usable color table when the appropriate 2191 * method is available. Should update this to set-colors */ 2192 clut = default_colors; 2193 for (i = 0; i < 16; i++, clut += 3) 2194 if (prom_set_color(ih, i, clut[0], clut[1], 2195 clut[2]) != 0) 2196 break; 2197 2198 #ifdef CONFIG_LOGO_LINUX_CLUT224 2199 clut = PTRRELOC(logo_linux_clut224.clut); 2200 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2201 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2202 clut[2]) != 0) 2203 break; 2204 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2205 2206 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2207 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2208 PROM_ERROR) { 2209 u32 width, height, pitch, addr; 2210 2211 prom_printf("Setting btext !\n"); 2212 prom_getprop(node, "width", &width, 4); 2213 prom_getprop(node, "height", &height, 4); 2214 prom_getprop(node, "linebytes", &pitch, 4); 2215 prom_getprop(node, "address", &addr, 4); 2216 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2217 width, height, pitch, addr); 2218 btext_setup_display(width, height, 8, pitch, addr); 2219 } 2220 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2221 } 2222 } 2223 2224 2225 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2226 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2227 unsigned long needed, unsigned long align) 2228 { 2229 void *ret; 2230 2231 *mem_start = _ALIGN(*mem_start, align); 2232 while ((*mem_start + needed) > *mem_end) { 2233 unsigned long room, chunk; 2234 2235 prom_debug("Chunk exhausted, claiming more at %x...\n", 2236 alloc_bottom); 2237 room = alloc_top - alloc_bottom; 2238 if (room > DEVTREE_CHUNK_SIZE) 2239 room = DEVTREE_CHUNK_SIZE; 2240 if (room < PAGE_SIZE) 2241 prom_panic("No memory for flatten_device_tree " 2242 "(no room)\n"); 2243 chunk = alloc_up(room, 0); 2244 if (chunk == 0) 2245 prom_panic("No memory for flatten_device_tree " 2246 "(claim failed)\n"); 2247 *mem_end = chunk + room; 2248 } 2249 2250 ret = (void *)*mem_start; 2251 *mem_start += needed; 2252 2253 return ret; 2254 } 2255 2256 #define dt_push_token(token, mem_start, mem_end) do { \ 2257 void *room = make_room(mem_start, mem_end, 4, 4); \ 2258 *(__be32 *)room = cpu_to_be32(token); \ 2259 } while(0) 2260 2261 static unsigned long __init dt_find_string(char *str) 2262 { 2263 char *s, *os; 2264 2265 s = os = (char *)dt_string_start; 2266 s += 4; 2267 while (s < (char *)dt_string_end) { 2268 if (strcmp(s, str) == 0) 2269 return s - os; 2270 s += strlen(s) + 1; 2271 } 2272 return 0; 2273 } 2274 2275 /* 2276 * The Open Firmware 1275 specification states properties must be 31 bytes or 2277 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2278 */ 2279 #define MAX_PROPERTY_NAME 64 2280 2281 static void __init scan_dt_build_strings(phandle node, 2282 unsigned long *mem_start, 2283 unsigned long *mem_end) 2284 { 2285 char *prev_name, *namep, *sstart; 2286 unsigned long soff; 2287 phandle child; 2288 2289 sstart = (char *)dt_string_start; 2290 2291 /* get and store all property names */ 2292 prev_name = ""; 2293 for (;;) { 2294 /* 64 is max len of name including nul. */ 2295 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2296 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2297 /* No more nodes: unwind alloc */ 2298 *mem_start = (unsigned long)namep; 2299 break; 2300 } 2301 2302 /* skip "name" */ 2303 if (strcmp(namep, "name") == 0) { 2304 *mem_start = (unsigned long)namep; 2305 prev_name = "name"; 2306 continue; 2307 } 2308 /* get/create string entry */ 2309 soff = dt_find_string(namep); 2310 if (soff != 0) { 2311 *mem_start = (unsigned long)namep; 2312 namep = sstart + soff; 2313 } else { 2314 /* Trim off some if we can */ 2315 *mem_start = (unsigned long)namep + strlen(namep) + 1; 2316 dt_string_end = *mem_start; 2317 } 2318 prev_name = namep; 2319 } 2320 2321 /* do all our children */ 2322 child = call_prom("child", 1, 1, node); 2323 while (child != 0) { 2324 scan_dt_build_strings(child, mem_start, mem_end); 2325 child = call_prom("peer", 1, 1, child); 2326 } 2327 } 2328 2329 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2330 unsigned long *mem_end) 2331 { 2332 phandle child; 2333 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2334 unsigned long soff; 2335 unsigned char *valp; 2336 static char pname[MAX_PROPERTY_NAME]; 2337 int l, room, has_phandle = 0; 2338 2339 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2340 2341 /* get the node's full name */ 2342 namep = (char *)*mem_start; 2343 room = *mem_end - *mem_start; 2344 if (room > 255) 2345 room = 255; 2346 l = call_prom("package-to-path", 3, 1, node, namep, room); 2347 if (l >= 0) { 2348 /* Didn't fit? Get more room. */ 2349 if (l >= room) { 2350 if (l >= *mem_end - *mem_start) 2351 namep = make_room(mem_start, mem_end, l+1, 1); 2352 call_prom("package-to-path", 3, 1, node, namep, l); 2353 } 2354 namep[l] = '\0'; 2355 2356 /* Fixup an Apple bug where they have bogus \0 chars in the 2357 * middle of the path in some properties, and extract 2358 * the unit name (everything after the last '/'). 2359 */ 2360 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2361 if (*p == '/') 2362 lp = namep; 2363 else if (*p != 0) 2364 *lp++ = *p; 2365 } 2366 *lp = 0; 2367 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2368 } 2369 2370 /* get it again for debugging */ 2371 path = prom_scratch; 2372 memset(path, 0, PROM_SCRATCH_SIZE); 2373 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 2374 2375 /* get and store all properties */ 2376 prev_name = ""; 2377 sstart = (char *)dt_string_start; 2378 for (;;) { 2379 if (call_prom("nextprop", 3, 1, node, prev_name, 2380 pname) != 1) 2381 break; 2382 2383 /* skip "name" */ 2384 if (strcmp(pname, "name") == 0) { 2385 prev_name = "name"; 2386 continue; 2387 } 2388 2389 /* find string offset */ 2390 soff = dt_find_string(pname); 2391 if (soff == 0) { 2392 prom_printf("WARNING: Can't find string index for" 2393 " <%s>, node %s\n", pname, path); 2394 break; 2395 } 2396 prev_name = sstart + soff; 2397 2398 /* get length */ 2399 l = call_prom("getproplen", 2, 1, node, pname); 2400 2401 /* sanity checks */ 2402 if (l == PROM_ERROR) 2403 continue; 2404 2405 /* push property head */ 2406 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2407 dt_push_token(l, mem_start, mem_end); 2408 dt_push_token(soff, mem_start, mem_end); 2409 2410 /* push property content */ 2411 valp = make_room(mem_start, mem_end, l, 4); 2412 call_prom("getprop", 4, 1, node, pname, valp, l); 2413 *mem_start = _ALIGN(*mem_start, 4); 2414 2415 if (!strcmp(pname, "phandle")) 2416 has_phandle = 1; 2417 } 2418 2419 /* Add a "linux,phandle" property if no "phandle" property already 2420 * existed (can happen with OPAL) 2421 */ 2422 if (!has_phandle) { 2423 soff = dt_find_string("linux,phandle"); 2424 if (soff == 0) 2425 prom_printf("WARNING: Can't find string index for" 2426 " <linux-phandle> node %s\n", path); 2427 else { 2428 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2429 dt_push_token(4, mem_start, mem_end); 2430 dt_push_token(soff, mem_start, mem_end); 2431 valp = make_room(mem_start, mem_end, 4, 4); 2432 *(__be32 *)valp = cpu_to_be32(node); 2433 } 2434 } 2435 2436 /* do all our children */ 2437 child = call_prom("child", 1, 1, node); 2438 while (child != 0) { 2439 scan_dt_build_struct(child, mem_start, mem_end); 2440 child = call_prom("peer", 1, 1, child); 2441 } 2442 2443 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2444 } 2445 2446 static void __init flatten_device_tree(void) 2447 { 2448 phandle root; 2449 unsigned long mem_start, mem_end, room; 2450 struct boot_param_header *hdr; 2451 char *namep; 2452 u64 *rsvmap; 2453 2454 /* 2455 * Check how much room we have between alloc top & bottom (+/- a 2456 * few pages), crop to 1MB, as this is our "chunk" size 2457 */ 2458 room = alloc_top - alloc_bottom - 0x4000; 2459 if (room > DEVTREE_CHUNK_SIZE) 2460 room = DEVTREE_CHUNK_SIZE; 2461 prom_debug("starting device tree allocs at %x\n", alloc_bottom); 2462 2463 /* Now try to claim that */ 2464 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2465 if (mem_start == 0) 2466 prom_panic("Can't allocate initial device-tree chunk\n"); 2467 mem_end = mem_start + room; 2468 2469 /* Get root of tree */ 2470 root = call_prom("peer", 1, 1, (phandle)0); 2471 if (root == (phandle)0) 2472 prom_panic ("couldn't get device tree root\n"); 2473 2474 /* Build header and make room for mem rsv map */ 2475 mem_start = _ALIGN(mem_start, 4); 2476 hdr = make_room(&mem_start, &mem_end, 2477 sizeof(struct boot_param_header), 4); 2478 dt_header_start = (unsigned long)hdr; 2479 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2480 2481 /* Start of strings */ 2482 mem_start = PAGE_ALIGN(mem_start); 2483 dt_string_start = mem_start; 2484 mem_start += 4; /* hole */ 2485 2486 /* Add "linux,phandle" in there, we'll need it */ 2487 namep = make_room(&mem_start, &mem_end, 16, 1); 2488 strcpy(namep, "linux,phandle"); 2489 mem_start = (unsigned long)namep + strlen(namep) + 1; 2490 2491 /* Build string array */ 2492 prom_printf("Building dt strings...\n"); 2493 scan_dt_build_strings(root, &mem_start, &mem_end); 2494 dt_string_end = mem_start; 2495 2496 /* Build structure */ 2497 mem_start = PAGE_ALIGN(mem_start); 2498 dt_struct_start = mem_start; 2499 prom_printf("Building dt structure...\n"); 2500 scan_dt_build_struct(root, &mem_start, &mem_end); 2501 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2502 dt_struct_end = PAGE_ALIGN(mem_start); 2503 2504 /* Finish header */ 2505 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2506 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2507 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2508 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2509 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2510 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2511 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2512 hdr->version = cpu_to_be32(OF_DT_VERSION); 2513 /* Version 16 is not backward compatible */ 2514 hdr->last_comp_version = cpu_to_be32(0x10); 2515 2516 /* Copy the reserve map in */ 2517 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2518 2519 #ifdef DEBUG_PROM 2520 { 2521 int i; 2522 prom_printf("reserved memory map:\n"); 2523 for (i = 0; i < mem_reserve_cnt; i++) 2524 prom_printf(" %x - %x\n", 2525 be64_to_cpu(mem_reserve_map[i].base), 2526 be64_to_cpu(mem_reserve_map[i].size)); 2527 } 2528 #endif 2529 /* Bump mem_reserve_cnt to cause further reservations to fail 2530 * since it's too late. 2531 */ 2532 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2533 2534 prom_printf("Device tree strings 0x%x -> 0x%x\n", 2535 dt_string_start, dt_string_end); 2536 prom_printf("Device tree struct 0x%x -> 0x%x\n", 2537 dt_struct_start, dt_struct_end); 2538 } 2539 2540 #ifdef CONFIG_PPC_MAPLE 2541 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2542 * The values are bad, and it doesn't even have the right number of cells. */ 2543 static void __init fixup_device_tree_maple(void) 2544 { 2545 phandle isa; 2546 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2547 u32 isa_ranges[6]; 2548 char *name; 2549 2550 name = "/ht@0/isa@4"; 2551 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2552 if (!PHANDLE_VALID(isa)) { 2553 name = "/ht@0/isa@6"; 2554 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2555 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2556 } 2557 if (!PHANDLE_VALID(isa)) 2558 return; 2559 2560 if (prom_getproplen(isa, "ranges") != 12) 2561 return; 2562 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2563 == PROM_ERROR) 2564 return; 2565 2566 if (isa_ranges[0] != 0x1 || 2567 isa_ranges[1] != 0xf4000000 || 2568 isa_ranges[2] != 0x00010000) 2569 return; 2570 2571 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2572 2573 isa_ranges[0] = 0x1; 2574 isa_ranges[1] = 0x0; 2575 isa_ranges[2] = rloc; 2576 isa_ranges[3] = 0x0; 2577 isa_ranges[4] = 0x0; 2578 isa_ranges[5] = 0x00010000; 2579 prom_setprop(isa, name, "ranges", 2580 isa_ranges, sizeof(isa_ranges)); 2581 } 2582 2583 #define CPC925_MC_START 0xf8000000 2584 #define CPC925_MC_LENGTH 0x1000000 2585 /* The values for memory-controller don't have right number of cells */ 2586 static void __init fixup_device_tree_maple_memory_controller(void) 2587 { 2588 phandle mc; 2589 u32 mc_reg[4]; 2590 char *name = "/hostbridge@f8000000"; 2591 u32 ac, sc; 2592 2593 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2594 if (!PHANDLE_VALID(mc)) 2595 return; 2596 2597 if (prom_getproplen(mc, "reg") != 8) 2598 return; 2599 2600 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2601 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2602 if ((ac != 2) || (sc != 2)) 2603 return; 2604 2605 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2606 return; 2607 2608 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2609 return; 2610 2611 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2612 2613 mc_reg[0] = 0x0; 2614 mc_reg[1] = CPC925_MC_START; 2615 mc_reg[2] = 0x0; 2616 mc_reg[3] = CPC925_MC_LENGTH; 2617 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2618 } 2619 #else 2620 #define fixup_device_tree_maple() 2621 #define fixup_device_tree_maple_memory_controller() 2622 #endif 2623 2624 #ifdef CONFIG_PPC_CHRP 2625 /* 2626 * Pegasos and BriQ lacks the "ranges" property in the isa node 2627 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2628 * Pegasos has the IDE configured in legacy mode, but advertised as native 2629 */ 2630 static void __init fixup_device_tree_chrp(void) 2631 { 2632 phandle ph; 2633 u32 prop[6]; 2634 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2635 char *name; 2636 int rc; 2637 2638 name = "/pci@80000000/isa@c"; 2639 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2640 if (!PHANDLE_VALID(ph)) { 2641 name = "/pci@ff500000/isa@6"; 2642 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2643 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2644 } 2645 if (PHANDLE_VALID(ph)) { 2646 rc = prom_getproplen(ph, "ranges"); 2647 if (rc == 0 || rc == PROM_ERROR) { 2648 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2649 2650 prop[0] = 0x1; 2651 prop[1] = 0x0; 2652 prop[2] = rloc; 2653 prop[3] = 0x0; 2654 prop[4] = 0x0; 2655 prop[5] = 0x00010000; 2656 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2657 } 2658 } 2659 2660 name = "/pci@80000000/ide@C,1"; 2661 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2662 if (PHANDLE_VALID(ph)) { 2663 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2664 prop[0] = 14; 2665 prop[1] = 0x0; 2666 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2667 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2668 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2669 if (rc == sizeof(u32)) { 2670 prop[0] &= ~0x5; 2671 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2672 } 2673 } 2674 } 2675 #else 2676 #define fixup_device_tree_chrp() 2677 #endif 2678 2679 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2680 static void __init fixup_device_tree_pmac(void) 2681 { 2682 phandle u3, i2c, mpic; 2683 u32 u3_rev; 2684 u32 interrupts[2]; 2685 u32 parent; 2686 2687 /* Some G5s have a missing interrupt definition, fix it up here */ 2688 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2689 if (!PHANDLE_VALID(u3)) 2690 return; 2691 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2692 if (!PHANDLE_VALID(i2c)) 2693 return; 2694 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2695 if (!PHANDLE_VALID(mpic)) 2696 return; 2697 2698 /* check if proper rev of u3 */ 2699 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2700 == PROM_ERROR) 2701 return; 2702 if (u3_rev < 0x35 || u3_rev > 0x39) 2703 return; 2704 /* does it need fixup ? */ 2705 if (prom_getproplen(i2c, "interrupts") > 0) 2706 return; 2707 2708 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2709 2710 /* interrupt on this revision of u3 is number 0 and level */ 2711 interrupts[0] = 0; 2712 interrupts[1] = 1; 2713 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2714 &interrupts, sizeof(interrupts)); 2715 parent = (u32)mpic; 2716 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2717 &parent, sizeof(parent)); 2718 } 2719 #else 2720 #define fixup_device_tree_pmac() 2721 #endif 2722 2723 #ifdef CONFIG_PPC_EFIKA 2724 /* 2725 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2726 * to talk to the phy. If the phy-handle property is missing, then this 2727 * function is called to add the appropriate nodes and link it to the 2728 * ethernet node. 2729 */ 2730 static void __init fixup_device_tree_efika_add_phy(void) 2731 { 2732 u32 node; 2733 char prop[64]; 2734 int rv; 2735 2736 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2737 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2738 if (!PHANDLE_VALID(node)) 2739 return; 2740 2741 /* Check if the phy-handle property exists - bail if it does */ 2742 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2743 if (!rv) 2744 return; 2745 2746 /* 2747 * At this point the ethernet device doesn't have a phy described. 2748 * Now we need to add the missing phy node and linkage 2749 */ 2750 2751 /* Check for an MDIO bus node - if missing then create one */ 2752 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2753 if (!PHANDLE_VALID(node)) { 2754 prom_printf("Adding Ethernet MDIO node\n"); 2755 call_prom("interpret", 1, 1, 2756 " s\" /builtin\" find-device" 2757 " new-device" 2758 " 1 encode-int s\" #address-cells\" property" 2759 " 0 encode-int s\" #size-cells\" property" 2760 " s\" mdio\" device-name" 2761 " s\" fsl,mpc5200b-mdio\" encode-string" 2762 " s\" compatible\" property" 2763 " 0xf0003000 0x400 reg" 2764 " 0x2 encode-int" 2765 " 0x5 encode-int encode+" 2766 " 0x3 encode-int encode+" 2767 " s\" interrupts\" property" 2768 " finish-device"); 2769 }; 2770 2771 /* Check for a PHY device node - if missing then create one and 2772 * give it's phandle to the ethernet node */ 2773 node = call_prom("finddevice", 1, 1, 2774 ADDR("/builtin/mdio/ethernet-phy")); 2775 if (!PHANDLE_VALID(node)) { 2776 prom_printf("Adding Ethernet PHY node\n"); 2777 call_prom("interpret", 1, 1, 2778 " s\" /builtin/mdio\" find-device" 2779 " new-device" 2780 " s\" ethernet-phy\" device-name" 2781 " 0x10 encode-int s\" reg\" property" 2782 " my-self" 2783 " ihandle>phandle" 2784 " finish-device" 2785 " s\" /builtin/ethernet\" find-device" 2786 " encode-int" 2787 " s\" phy-handle\" property" 2788 " device-end"); 2789 } 2790 } 2791 2792 static void __init fixup_device_tree_efika(void) 2793 { 2794 int sound_irq[3] = { 2, 2, 0 }; 2795 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2796 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2797 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2798 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2799 u32 node; 2800 char prop[64]; 2801 int rv, len; 2802 2803 /* Check if we're really running on a EFIKA */ 2804 node = call_prom("finddevice", 1, 1, ADDR("/")); 2805 if (!PHANDLE_VALID(node)) 2806 return; 2807 2808 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2809 if (rv == PROM_ERROR) 2810 return; 2811 if (strcmp(prop, "EFIKA5K2")) 2812 return; 2813 2814 prom_printf("Applying EFIKA device tree fixups\n"); 2815 2816 /* Claiming to be 'chrp' is death */ 2817 node = call_prom("finddevice", 1, 1, ADDR("/")); 2818 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2819 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0)) 2820 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 2821 2822 /* CODEGEN,description is exposed in /proc/cpuinfo so 2823 fix that too */ 2824 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 2825 if (rv != PROM_ERROR && (strstr(prop, "CHRP"))) 2826 prom_setprop(node, "/", "CODEGEN,description", 2827 "Efika 5200B PowerPC System", 2828 sizeof("Efika 5200B PowerPC System")); 2829 2830 /* Fixup bestcomm interrupts property */ 2831 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 2832 if (PHANDLE_VALID(node)) { 2833 len = prom_getproplen(node, "interrupts"); 2834 if (len == 12) { 2835 prom_printf("Fixing bestcomm interrupts property\n"); 2836 prom_setprop(node, "/builtin/bestcom", "interrupts", 2837 bcomm_irq, sizeof(bcomm_irq)); 2838 } 2839 } 2840 2841 /* Fixup sound interrupts property */ 2842 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 2843 if (PHANDLE_VALID(node)) { 2844 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 2845 if (rv == PROM_ERROR) { 2846 prom_printf("Adding sound interrupts property\n"); 2847 prom_setprop(node, "/builtin/sound", "interrupts", 2848 sound_irq, sizeof(sound_irq)); 2849 } 2850 } 2851 2852 /* Make sure ethernet phy-handle property exists */ 2853 fixup_device_tree_efika_add_phy(); 2854 } 2855 #else 2856 #define fixup_device_tree_efika() 2857 #endif 2858 2859 #ifdef CONFIG_PPC_PASEMI_NEMO 2860 /* 2861 * CFE supplied on Nemo is broken in several ways, biggest 2862 * problem is that it reassigns ISA interrupts to unused mpic ints. 2863 * Add an interrupt-controller property for the io-bridge to use 2864 * and correct the ints so we can attach them to an irq_domain 2865 */ 2866 static void __init fixup_device_tree_pasemi(void) 2867 { 2868 u32 interrupts[2], parent, rval, val = 0; 2869 char *name, *pci_name; 2870 phandle iob, node; 2871 2872 /* Find the root pci node */ 2873 name = "/pxp@0,e0000000"; 2874 iob = call_prom("finddevice", 1, 1, ADDR(name)); 2875 if (!PHANDLE_VALID(iob)) 2876 return; 2877 2878 /* check if interrupt-controller node set yet */ 2879 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 2880 return; 2881 2882 prom_printf("adding interrupt-controller property for SB600...\n"); 2883 2884 prom_setprop(iob, name, "interrupt-controller", &val, 0); 2885 2886 pci_name = "/pxp@0,e0000000/pci@11"; 2887 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 2888 parent = ADDR(iob); 2889 2890 for( ; prom_next_node(&node); ) { 2891 /* scan each node for one with an interrupt */ 2892 if (!PHANDLE_VALID(node)) 2893 continue; 2894 2895 rval = prom_getproplen(node, "interrupts"); 2896 if (rval == 0 || rval == PROM_ERROR) 2897 continue; 2898 2899 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 2900 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 2901 continue; 2902 2903 /* found a node, update both interrupts and interrupt-parent */ 2904 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 2905 interrupts[0] -= 203; 2906 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 2907 interrupts[0] -= 213; 2908 if (interrupts[0] == 221) 2909 interrupts[0] = 14; 2910 if (interrupts[0] == 222) 2911 interrupts[0] = 8; 2912 2913 prom_setprop(node, pci_name, "interrupts", interrupts, 2914 sizeof(interrupts)); 2915 prom_setprop(node, pci_name, "interrupt-parent", &parent, 2916 sizeof(parent)); 2917 } 2918 2919 /* 2920 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 2921 * so that generic isa-bridge code can add the SB600 and its on-board 2922 * peripherals. 2923 */ 2924 name = "/pxp@0,e0000000/io-bridge@0"; 2925 iob = call_prom("finddevice", 1, 1, ADDR(name)); 2926 if (!PHANDLE_VALID(iob)) 2927 return; 2928 2929 /* device_type is already set, just change it. */ 2930 2931 prom_printf("Changing device_type of SB600 node...\n"); 2932 2933 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 2934 } 2935 #else /* !CONFIG_PPC_PASEMI_NEMO */ 2936 static inline void fixup_device_tree_pasemi(void) { } 2937 #endif 2938 2939 static void __init fixup_device_tree(void) 2940 { 2941 fixup_device_tree_maple(); 2942 fixup_device_tree_maple_memory_controller(); 2943 fixup_device_tree_chrp(); 2944 fixup_device_tree_pmac(); 2945 fixup_device_tree_efika(); 2946 fixup_device_tree_pasemi(); 2947 } 2948 2949 static void __init prom_find_boot_cpu(void) 2950 { 2951 __be32 rval; 2952 ihandle prom_cpu; 2953 phandle cpu_pkg; 2954 2955 rval = 0; 2956 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 2957 return; 2958 prom_cpu = be32_to_cpu(rval); 2959 2960 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2961 2962 if (!PHANDLE_VALID(cpu_pkg)) 2963 return; 2964 2965 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 2966 prom.cpu = be32_to_cpu(rval); 2967 2968 prom_debug("Booting CPU hw index = %lu\n", prom.cpu); 2969 } 2970 2971 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 2972 { 2973 #ifdef CONFIG_BLK_DEV_INITRD 2974 if (r3 && r4 && r4 != 0xdeadbeef) { 2975 __be64 val; 2976 2977 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 2978 prom_initrd_end = prom_initrd_start + r4; 2979 2980 val = cpu_to_be64(prom_initrd_start); 2981 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 2982 &val, sizeof(val)); 2983 val = cpu_to_be64(prom_initrd_end); 2984 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 2985 &val, sizeof(val)); 2986 2987 reserve_mem(prom_initrd_start, 2988 prom_initrd_end - prom_initrd_start); 2989 2990 prom_debug("initrd_start=0x%x\n", prom_initrd_start); 2991 prom_debug("initrd_end=0x%x\n", prom_initrd_end); 2992 } 2993 #endif /* CONFIG_BLK_DEV_INITRD */ 2994 } 2995 2996 #ifdef CONFIG_PPC64 2997 #ifdef CONFIG_RELOCATABLE 2998 static void reloc_toc(void) 2999 { 3000 } 3001 3002 static void unreloc_toc(void) 3003 { 3004 } 3005 #else 3006 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3007 { 3008 unsigned long i; 3009 unsigned long *toc_entry; 3010 3011 /* Get the start of the TOC by using r2 directly. */ 3012 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3013 3014 for (i = 0; i < nr_entries; i++) { 3015 *toc_entry = *toc_entry + offset; 3016 toc_entry++; 3017 } 3018 } 3019 3020 static void reloc_toc(void) 3021 { 3022 unsigned long offset = reloc_offset(); 3023 unsigned long nr_entries = 3024 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3025 3026 __reloc_toc(offset, nr_entries); 3027 3028 mb(); 3029 } 3030 3031 static void unreloc_toc(void) 3032 { 3033 unsigned long offset = reloc_offset(); 3034 unsigned long nr_entries = 3035 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3036 3037 mb(); 3038 3039 __reloc_toc(-offset, nr_entries); 3040 } 3041 #endif 3042 #endif 3043 3044 /* 3045 * We enter here early on, when the Open Firmware prom is still 3046 * handling exceptions and the MMU hash table for us. 3047 */ 3048 3049 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3050 unsigned long pp, 3051 unsigned long r6, unsigned long r7, 3052 unsigned long kbase) 3053 { 3054 unsigned long hdr; 3055 3056 #ifdef CONFIG_PPC32 3057 unsigned long offset = reloc_offset(); 3058 reloc_got2(offset); 3059 #else 3060 reloc_toc(); 3061 #endif 3062 3063 /* 3064 * First zero the BSS 3065 */ 3066 memset(&__bss_start, 0, __bss_stop - __bss_start); 3067 3068 /* 3069 * Init interface to Open Firmware, get some node references, 3070 * like /chosen 3071 */ 3072 prom_init_client_services(pp); 3073 3074 /* 3075 * See if this OF is old enough that we need to do explicit maps 3076 * and other workarounds 3077 */ 3078 prom_find_mmu(); 3079 3080 /* 3081 * Init prom stdout device 3082 */ 3083 prom_init_stdout(); 3084 3085 prom_printf("Preparing to boot %s", linux_banner); 3086 3087 /* 3088 * Get default machine type. At this point, we do not differentiate 3089 * between pSeries SMP and pSeries LPAR 3090 */ 3091 of_platform = prom_find_machine_type(); 3092 prom_printf("Detected machine type: %x\n", of_platform); 3093 3094 #ifndef CONFIG_NONSTATIC_KERNEL 3095 /* Bail if this is a kdump kernel. */ 3096 if (PHYSICAL_START > 0) 3097 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3098 #endif 3099 3100 /* 3101 * Check for an initrd 3102 */ 3103 prom_check_initrd(r3, r4); 3104 3105 /* 3106 * Do early parsing of command line 3107 */ 3108 early_cmdline_parse(); 3109 3110 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 3111 /* 3112 * On pSeries, inform the firmware about our capabilities 3113 */ 3114 if (of_platform == PLATFORM_PSERIES || 3115 of_platform == PLATFORM_PSERIES_LPAR) 3116 prom_send_capabilities(); 3117 #endif 3118 3119 /* 3120 * Copy the CPU hold code 3121 */ 3122 if (of_platform != PLATFORM_POWERMAC) 3123 copy_and_flush(0, kbase, 0x100, 0); 3124 3125 /* 3126 * Initialize memory management within prom_init 3127 */ 3128 prom_init_mem(); 3129 3130 /* 3131 * Determine which cpu is actually running right _now_ 3132 */ 3133 prom_find_boot_cpu(); 3134 3135 /* 3136 * Initialize display devices 3137 */ 3138 prom_check_displays(); 3139 3140 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3141 /* 3142 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3143 * that uses the allocator, we need to make sure we get the top of memory 3144 * available for us here... 3145 */ 3146 if (of_platform == PLATFORM_PSERIES) 3147 prom_initialize_tce_table(); 3148 #endif 3149 3150 /* 3151 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3152 * have a usable RTAS implementation. 3153 */ 3154 if (of_platform != PLATFORM_POWERMAC && 3155 of_platform != PLATFORM_OPAL) 3156 prom_instantiate_rtas(); 3157 3158 #ifdef CONFIG_PPC_POWERNV 3159 if (of_platform == PLATFORM_OPAL) 3160 prom_instantiate_opal(); 3161 #endif /* CONFIG_PPC_POWERNV */ 3162 3163 #ifdef CONFIG_PPC64 3164 /* instantiate sml */ 3165 prom_instantiate_sml(); 3166 #endif 3167 3168 /* 3169 * On non-powermacs, put all CPUs in spin-loops. 3170 * 3171 * PowerMacs use a different mechanism to spin CPUs 3172 * 3173 * (This must be done after instanciating RTAS) 3174 */ 3175 if (of_platform != PLATFORM_POWERMAC && 3176 of_platform != PLATFORM_OPAL) 3177 prom_hold_cpus(); 3178 3179 /* 3180 * Fill in some infos for use by the kernel later on 3181 */ 3182 if (prom_memory_limit) { 3183 __be64 val = cpu_to_be64(prom_memory_limit); 3184 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3185 &val, sizeof(val)); 3186 } 3187 #ifdef CONFIG_PPC64 3188 if (prom_iommu_off) 3189 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3190 NULL, 0); 3191 3192 if (prom_iommu_force_on) 3193 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3194 NULL, 0); 3195 3196 if (prom_tce_alloc_start) { 3197 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3198 &prom_tce_alloc_start, 3199 sizeof(prom_tce_alloc_start)); 3200 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3201 &prom_tce_alloc_end, 3202 sizeof(prom_tce_alloc_end)); 3203 } 3204 #endif 3205 3206 /* 3207 * Fixup any known bugs in the device-tree 3208 */ 3209 fixup_device_tree(); 3210 3211 /* 3212 * Now finally create the flattened device-tree 3213 */ 3214 prom_printf("copying OF device tree...\n"); 3215 flatten_device_tree(); 3216 3217 /* 3218 * in case stdin is USB and still active on IBM machines... 3219 * Unfortunately quiesce crashes on some powermacs if we have 3220 * closed stdin already (in particular the powerbook 101). It 3221 * appears that the OPAL version of OFW doesn't like it either. 3222 */ 3223 if (of_platform != PLATFORM_POWERMAC && 3224 of_platform != PLATFORM_OPAL) 3225 prom_close_stdin(); 3226 3227 /* 3228 * Call OF "quiesce" method to shut down pending DMA's from 3229 * devices etc... 3230 */ 3231 prom_printf("Quiescing Open Firmware ...\n"); 3232 call_prom("quiesce", 0, 0); 3233 3234 /* 3235 * And finally, call the kernel passing it the flattened device 3236 * tree and NULL as r5, thus triggering the new entry point which 3237 * is common to us and kexec 3238 */ 3239 hdr = dt_header_start; 3240 3241 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 3242 if (of_platform != PLATFORM_OPAL) { 3243 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3244 prom_debug("->dt_header_start=0x%x\n", hdr); 3245 } 3246 3247 #ifdef CONFIG_PPC32 3248 reloc_got2(-offset); 3249 #else 3250 unreloc_toc(); 3251 #endif 3252 3253 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 3254 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */ 3255 __start(hdr, kbase, 0, 0, 0, 3256 prom_opal_base, prom_opal_entry); 3257 #else 3258 __start(hdr, kbase, 0, 0, 0, 0, 0); 3259 #endif 3260 3261 return 0; 3262 } 3263