1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for interfacing to Open Firmware. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 */ 11 12 #undef DEBUG_PROM 13 14 /* we cannot use FORTIFY as it brings in new symbols */ 15 #define __NO_FORTIFY 16 17 #include <stdarg.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/init.h> 21 #include <linux/threads.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 #include <linux/pci.h> 25 #include <linux/proc_fs.h> 26 #include <linux/delay.h> 27 #include <linux/initrd.h> 28 #include <linux/bitops.h> 29 #include <asm/prom.h> 30 #include <asm/rtas.h> 31 #include <asm/page.h> 32 #include <asm/processor.h> 33 #include <asm/irq.h> 34 #include <asm/io.h> 35 #include <asm/smp.h> 36 #include <asm/mmu.h> 37 #include <asm/pgtable.h> 38 #include <asm/iommu.h> 39 #include <asm/btext.h> 40 #include <asm/sections.h> 41 #include <asm/machdep.h> 42 #include <asm/asm-prototypes.h> 43 44 #include <linux/linux_logo.h> 45 46 /* All of prom_init bss lives here */ 47 #define __prombss __section(.bss.prominit) 48 49 /* 50 * Eventually bump that one up 51 */ 52 #define DEVTREE_CHUNK_SIZE 0x100000 53 54 /* 55 * This is the size of the local memory reserve map that gets copied 56 * into the boot params passed to the kernel. That size is totally 57 * flexible as the kernel just reads the list until it encounters an 58 * entry with size 0, so it can be changed without breaking binary 59 * compatibility 60 */ 61 #define MEM_RESERVE_MAP_SIZE 8 62 63 /* 64 * prom_init() is called very early on, before the kernel text 65 * and data have been mapped to KERNELBASE. At this point the code 66 * is running at whatever address it has been loaded at. 67 * On ppc32 we compile with -mrelocatable, which means that references 68 * to extern and static variables get relocated automatically. 69 * ppc64 objects are always relocatable, we just need to relocate the 70 * TOC. 71 * 72 * Because OF may have mapped I/O devices into the area starting at 73 * KERNELBASE, particularly on CHRP machines, we can't safely call 74 * OF once the kernel has been mapped to KERNELBASE. Therefore all 75 * OF calls must be done within prom_init(). 76 * 77 * ADDR is used in calls to call_prom. The 4th and following 78 * arguments to call_prom should be 32-bit values. 79 * On ppc64, 64 bit values are truncated to 32 bits (and 80 * fortunately don't get interpreted as two arguments). 81 */ 82 #define ADDR(x) (u32)(unsigned long)(x) 83 84 #ifdef CONFIG_PPC64 85 #define OF_WORKAROUNDS 0 86 #else 87 #define OF_WORKAROUNDS of_workarounds 88 static int of_workarounds __prombss; 89 #endif 90 91 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 92 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 93 94 #define PROM_BUG() do { \ 95 prom_printf("kernel BUG at %s line 0x%x!\n", \ 96 __FILE__, __LINE__); \ 97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 98 } while (0) 99 100 #ifdef DEBUG_PROM 101 #define prom_debug(x...) prom_printf(x) 102 #else 103 #define prom_debug(x...) do { } while (0) 104 #endif 105 106 107 typedef u32 prom_arg_t; 108 109 struct prom_args { 110 __be32 service; 111 __be32 nargs; 112 __be32 nret; 113 __be32 args[10]; 114 }; 115 116 struct prom_t { 117 ihandle root; 118 phandle chosen; 119 int cpu; 120 ihandle stdout; 121 ihandle mmumap; 122 ihandle memory; 123 }; 124 125 struct mem_map_entry { 126 __be64 base; 127 __be64 size; 128 }; 129 130 typedef __be32 cell_t; 131 132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 133 unsigned long r6, unsigned long r7, unsigned long r8, 134 unsigned long r9); 135 136 #ifdef CONFIG_PPC64 137 extern int enter_prom(struct prom_args *args, unsigned long entry); 138 #else 139 static inline int enter_prom(struct prom_args *args, unsigned long entry) 140 { 141 return ((int (*)(struct prom_args *))entry)(args); 142 } 143 #endif 144 145 extern void copy_and_flush(unsigned long dest, unsigned long src, 146 unsigned long size, unsigned long offset); 147 148 /* prom structure */ 149 static struct prom_t __prombss prom; 150 151 static unsigned long __prombss prom_entry; 152 153 static char __prombss of_stdout_device[256]; 154 static char __prombss prom_scratch[256]; 155 156 static unsigned long __prombss dt_header_start; 157 static unsigned long __prombss dt_struct_start, dt_struct_end; 158 static unsigned long __prombss dt_string_start, dt_string_end; 159 160 static unsigned long __prombss prom_initrd_start, prom_initrd_end; 161 162 #ifdef CONFIG_PPC64 163 static int __prombss prom_iommu_force_on; 164 static int __prombss prom_iommu_off; 165 static unsigned long __prombss prom_tce_alloc_start; 166 static unsigned long __prombss prom_tce_alloc_end; 167 #endif 168 169 #ifdef CONFIG_PPC_PSERIES 170 static bool __prombss prom_radix_disable; 171 #endif 172 173 struct platform_support { 174 bool hash_mmu; 175 bool radix_mmu; 176 bool radix_gtse; 177 bool xive; 178 }; 179 180 /* Platforms codes are now obsolete in the kernel. Now only used within this 181 * file and ultimately gone too. Feel free to change them if you need, they 182 * are not shared with anything outside of this file anymore 183 */ 184 #define PLATFORM_PSERIES 0x0100 185 #define PLATFORM_PSERIES_LPAR 0x0101 186 #define PLATFORM_LPAR 0x0001 187 #define PLATFORM_POWERMAC 0x0400 188 #define PLATFORM_GENERIC 0x0500 189 190 static int __prombss of_platform; 191 192 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; 193 194 static unsigned long __prombss prom_memory_limit; 195 196 static unsigned long __prombss alloc_top; 197 static unsigned long __prombss alloc_top_high; 198 static unsigned long __prombss alloc_bottom; 199 static unsigned long __prombss rmo_top; 200 static unsigned long __prombss ram_top; 201 202 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 203 static int __prombss mem_reserve_cnt; 204 205 static cell_t __prombss regbuf[1024]; 206 207 static bool __prombss rtas_has_query_cpu_stopped; 208 209 210 /* 211 * Error results ... some OF calls will return "-1" on error, some 212 * will return 0, some will return either. To simplify, here are 213 * macros to use with any ihandle or phandle return value to check if 214 * it is valid 215 */ 216 217 #define PROM_ERROR (-1u) 218 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 219 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 220 221 /* Copied from lib/string.c and lib/kstrtox.c */ 222 223 static int __init prom_strcmp(const char *cs, const char *ct) 224 { 225 unsigned char c1, c2; 226 227 while (1) { 228 c1 = *cs++; 229 c2 = *ct++; 230 if (c1 != c2) 231 return c1 < c2 ? -1 : 1; 232 if (!c1) 233 break; 234 } 235 return 0; 236 } 237 238 static char __init *prom_strcpy(char *dest, const char *src) 239 { 240 char *tmp = dest; 241 242 while ((*dest++ = *src++) != '\0') 243 /* nothing */; 244 return tmp; 245 } 246 247 static int __init prom_strncmp(const char *cs, const char *ct, size_t count) 248 { 249 unsigned char c1, c2; 250 251 while (count) { 252 c1 = *cs++; 253 c2 = *ct++; 254 if (c1 != c2) 255 return c1 < c2 ? -1 : 1; 256 if (!c1) 257 break; 258 count--; 259 } 260 return 0; 261 } 262 263 static size_t __init prom_strlen(const char *s) 264 { 265 const char *sc; 266 267 for (sc = s; *sc != '\0'; ++sc) 268 /* nothing */; 269 return sc - s; 270 } 271 272 static int __init prom_memcmp(const void *cs, const void *ct, size_t count) 273 { 274 const unsigned char *su1, *su2; 275 int res = 0; 276 277 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 278 if ((res = *su1 - *su2) != 0) 279 break; 280 return res; 281 } 282 283 static char __init *prom_strstr(const char *s1, const char *s2) 284 { 285 size_t l1, l2; 286 287 l2 = prom_strlen(s2); 288 if (!l2) 289 return (char *)s1; 290 l1 = prom_strlen(s1); 291 while (l1 >= l2) { 292 l1--; 293 if (!prom_memcmp(s1, s2, l2)) 294 return (char *)s1; 295 s1++; 296 } 297 return NULL; 298 } 299 300 static size_t __init prom_strlcpy(char *dest, const char *src, size_t size) 301 { 302 size_t ret = prom_strlen(src); 303 304 if (size) { 305 size_t len = (ret >= size) ? size - 1 : ret; 306 memcpy(dest, src, len); 307 dest[len] = '\0'; 308 } 309 return ret; 310 } 311 312 #ifdef CONFIG_PPC_PSERIES 313 static int __init prom_strtobool(const char *s, bool *res) 314 { 315 if (!s) 316 return -EINVAL; 317 318 switch (s[0]) { 319 case 'y': 320 case 'Y': 321 case '1': 322 *res = true; 323 return 0; 324 case 'n': 325 case 'N': 326 case '0': 327 *res = false; 328 return 0; 329 case 'o': 330 case 'O': 331 switch (s[1]) { 332 case 'n': 333 case 'N': 334 *res = true; 335 return 0; 336 case 'f': 337 case 'F': 338 *res = false; 339 return 0; 340 default: 341 break; 342 } 343 default: 344 break; 345 } 346 347 return -EINVAL; 348 } 349 #endif 350 351 /* This is the one and *ONLY* place where we actually call open 352 * firmware. 353 */ 354 355 static int __init call_prom(const char *service, int nargs, int nret, ...) 356 { 357 int i; 358 struct prom_args args; 359 va_list list; 360 361 args.service = cpu_to_be32(ADDR(service)); 362 args.nargs = cpu_to_be32(nargs); 363 args.nret = cpu_to_be32(nret); 364 365 va_start(list, nret); 366 for (i = 0; i < nargs; i++) 367 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 368 va_end(list); 369 370 for (i = 0; i < nret; i++) 371 args.args[nargs+i] = 0; 372 373 if (enter_prom(&args, prom_entry) < 0) 374 return PROM_ERROR; 375 376 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 377 } 378 379 static int __init call_prom_ret(const char *service, int nargs, int nret, 380 prom_arg_t *rets, ...) 381 { 382 int i; 383 struct prom_args args; 384 va_list list; 385 386 args.service = cpu_to_be32(ADDR(service)); 387 args.nargs = cpu_to_be32(nargs); 388 args.nret = cpu_to_be32(nret); 389 390 va_start(list, rets); 391 for (i = 0; i < nargs; i++) 392 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 393 va_end(list); 394 395 for (i = 0; i < nret; i++) 396 args.args[nargs+i] = 0; 397 398 if (enter_prom(&args, prom_entry) < 0) 399 return PROM_ERROR; 400 401 if (rets != NULL) 402 for (i = 1; i < nret; ++i) 403 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 404 405 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 406 } 407 408 409 static void __init prom_print(const char *msg) 410 { 411 const char *p, *q; 412 413 if (prom.stdout == 0) 414 return; 415 416 for (p = msg; *p != 0; p = q) { 417 for (q = p; *q != 0 && *q != '\n'; ++q) 418 ; 419 if (q > p) 420 call_prom("write", 3, 1, prom.stdout, p, q - p); 421 if (*q == 0) 422 break; 423 ++q; 424 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 425 } 426 } 427 428 429 /* 430 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 431 * we do not need __udivdi3 or __umoddi3 on 32bits. 432 */ 433 static void __init prom_print_hex(unsigned long val) 434 { 435 int i, nibbles = sizeof(val)*2; 436 char buf[sizeof(val)*2+1]; 437 438 for (i = nibbles-1; i >= 0; i--) { 439 buf[i] = (val & 0xf) + '0'; 440 if (buf[i] > '9') 441 buf[i] += ('a'-'0'-10); 442 val >>= 4; 443 } 444 buf[nibbles] = '\0'; 445 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 446 } 447 448 /* max number of decimal digits in an unsigned long */ 449 #define UL_DIGITS 21 450 static void __init prom_print_dec(unsigned long val) 451 { 452 int i, size; 453 char buf[UL_DIGITS+1]; 454 455 for (i = UL_DIGITS-1; i >= 0; i--) { 456 buf[i] = (val % 10) + '0'; 457 val = val/10; 458 if (val == 0) 459 break; 460 } 461 /* shift stuff down */ 462 size = UL_DIGITS - i; 463 call_prom("write", 3, 1, prom.stdout, buf+i, size); 464 } 465 466 __printf(1, 2) 467 static void __init prom_printf(const char *format, ...) 468 { 469 const char *p, *q, *s; 470 va_list args; 471 unsigned long v; 472 long vs; 473 int n = 0; 474 475 va_start(args, format); 476 for (p = format; *p != 0; p = q) { 477 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 478 ; 479 if (q > p) 480 call_prom("write", 3, 1, prom.stdout, p, q - p); 481 if (*q == 0) 482 break; 483 if (*q == '\n') { 484 ++q; 485 call_prom("write", 3, 1, prom.stdout, 486 ADDR("\r\n"), 2); 487 continue; 488 } 489 ++q; 490 if (*q == 0) 491 break; 492 while (*q == 'l') { 493 ++q; 494 ++n; 495 } 496 switch (*q) { 497 case 's': 498 ++q; 499 s = va_arg(args, const char *); 500 prom_print(s); 501 break; 502 case 'x': 503 ++q; 504 switch (n) { 505 case 0: 506 v = va_arg(args, unsigned int); 507 break; 508 case 1: 509 v = va_arg(args, unsigned long); 510 break; 511 case 2: 512 default: 513 v = va_arg(args, unsigned long long); 514 break; 515 } 516 prom_print_hex(v); 517 break; 518 case 'u': 519 ++q; 520 switch (n) { 521 case 0: 522 v = va_arg(args, unsigned int); 523 break; 524 case 1: 525 v = va_arg(args, unsigned long); 526 break; 527 case 2: 528 default: 529 v = va_arg(args, unsigned long long); 530 break; 531 } 532 prom_print_dec(v); 533 break; 534 case 'd': 535 ++q; 536 switch (n) { 537 case 0: 538 vs = va_arg(args, int); 539 break; 540 case 1: 541 vs = va_arg(args, long); 542 break; 543 case 2: 544 default: 545 vs = va_arg(args, long long); 546 break; 547 } 548 if (vs < 0) { 549 prom_print("-"); 550 vs = -vs; 551 } 552 prom_print_dec(vs); 553 break; 554 } 555 } 556 va_end(args); 557 } 558 559 560 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 561 unsigned long align) 562 { 563 564 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 565 /* 566 * Old OF requires we claim physical and virtual separately 567 * and then map explicitly (assuming virtual mode) 568 */ 569 int ret; 570 prom_arg_t result; 571 572 ret = call_prom_ret("call-method", 5, 2, &result, 573 ADDR("claim"), prom.memory, 574 align, size, virt); 575 if (ret != 0 || result == -1) 576 return -1; 577 ret = call_prom_ret("call-method", 5, 2, &result, 578 ADDR("claim"), prom.mmumap, 579 align, size, virt); 580 if (ret != 0) { 581 call_prom("call-method", 4, 1, ADDR("release"), 582 prom.memory, size, virt); 583 return -1; 584 } 585 /* the 0x12 is M (coherence) + PP == read/write */ 586 call_prom("call-method", 6, 1, 587 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 588 return virt; 589 } 590 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 591 (prom_arg_t)align); 592 } 593 594 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 595 { 596 prom_print(reason); 597 /* Do not call exit because it clears the screen on pmac 598 * it also causes some sort of double-fault on early pmacs */ 599 if (of_platform == PLATFORM_POWERMAC) 600 asm("trap\n"); 601 602 /* ToDo: should put up an SRC here on pSeries */ 603 call_prom("exit", 0, 0); 604 605 for (;;) /* should never get here */ 606 ; 607 } 608 609 610 static int __init prom_next_node(phandle *nodep) 611 { 612 phandle node; 613 614 if ((node = *nodep) != 0 615 && (*nodep = call_prom("child", 1, 1, node)) != 0) 616 return 1; 617 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 618 return 1; 619 for (;;) { 620 if ((node = call_prom("parent", 1, 1, node)) == 0) 621 return 0; 622 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 623 return 1; 624 } 625 } 626 627 static inline int __init prom_getprop(phandle node, const char *pname, 628 void *value, size_t valuelen) 629 { 630 return call_prom("getprop", 4, 1, node, ADDR(pname), 631 (u32)(unsigned long) value, (u32) valuelen); 632 } 633 634 static inline int __init prom_getproplen(phandle node, const char *pname) 635 { 636 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 637 } 638 639 static void add_string(char **str, const char *q) 640 { 641 char *p = *str; 642 643 while (*q) 644 *p++ = *q++; 645 *p++ = ' '; 646 *str = p; 647 } 648 649 static char *tohex(unsigned int x) 650 { 651 static const char digits[] __initconst = "0123456789abcdef"; 652 static char result[9] __prombss; 653 int i; 654 655 result[8] = 0; 656 i = 8; 657 do { 658 --i; 659 result[i] = digits[x & 0xf]; 660 x >>= 4; 661 } while (x != 0 && i > 0); 662 return &result[i]; 663 } 664 665 static int __init prom_setprop(phandle node, const char *nodename, 666 const char *pname, void *value, size_t valuelen) 667 { 668 char cmd[256], *p; 669 670 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 671 return call_prom("setprop", 4, 1, node, ADDR(pname), 672 (u32)(unsigned long) value, (u32) valuelen); 673 674 /* gah... setprop doesn't work on longtrail, have to use interpret */ 675 p = cmd; 676 add_string(&p, "dev"); 677 add_string(&p, nodename); 678 add_string(&p, tohex((u32)(unsigned long) value)); 679 add_string(&p, tohex(valuelen)); 680 add_string(&p, tohex(ADDR(pname))); 681 add_string(&p, tohex(prom_strlen(pname))); 682 add_string(&p, "property"); 683 *p = 0; 684 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 685 } 686 687 /* We can't use the standard versions because of relocation headaches. */ 688 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 689 || ('a' <= (c) && (c) <= 'f') \ 690 || ('A' <= (c) && (c) <= 'F')) 691 692 #define isdigit(c) ('0' <= (c) && (c) <= '9') 693 #define islower(c) ('a' <= (c) && (c) <= 'z') 694 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 695 696 static unsigned long prom_strtoul(const char *cp, const char **endp) 697 { 698 unsigned long result = 0, base = 10, value; 699 700 if (*cp == '0') { 701 base = 8; 702 cp++; 703 if (toupper(*cp) == 'X') { 704 cp++; 705 base = 16; 706 } 707 } 708 709 while (isxdigit(*cp) && 710 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 711 result = result * base + value; 712 cp++; 713 } 714 715 if (endp) 716 *endp = cp; 717 718 return result; 719 } 720 721 static unsigned long prom_memparse(const char *ptr, const char **retptr) 722 { 723 unsigned long ret = prom_strtoul(ptr, retptr); 724 int shift = 0; 725 726 /* 727 * We can't use a switch here because GCC *may* generate a 728 * jump table which won't work, because we're not running at 729 * the address we're linked at. 730 */ 731 if ('G' == **retptr || 'g' == **retptr) 732 shift = 30; 733 734 if ('M' == **retptr || 'm' == **retptr) 735 shift = 20; 736 737 if ('K' == **retptr || 'k' == **retptr) 738 shift = 10; 739 740 if (shift) { 741 ret <<= shift; 742 (*retptr)++; 743 } 744 745 return ret; 746 } 747 748 /* 749 * Early parsing of the command line passed to the kernel, used for 750 * "mem=x" and the options that affect the iommu 751 */ 752 static void __init early_cmdline_parse(void) 753 { 754 const char *opt; 755 756 char *p; 757 int l = 0; 758 759 prom_cmd_line[0] = 0; 760 p = prom_cmd_line; 761 if ((long)prom.chosen > 0) 762 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 763 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */ 764 prom_strlcpy(prom_cmd_line, CONFIG_CMDLINE, sizeof(prom_cmd_line)); 765 prom_printf("command line: %s\n", prom_cmd_line); 766 767 #ifdef CONFIG_PPC64 768 opt = prom_strstr(prom_cmd_line, "iommu="); 769 if (opt) { 770 prom_printf("iommu opt is: %s\n", opt); 771 opt += 6; 772 while (*opt && *opt == ' ') 773 opt++; 774 if (!prom_strncmp(opt, "off", 3)) 775 prom_iommu_off = 1; 776 else if (!prom_strncmp(opt, "force", 5)) 777 prom_iommu_force_on = 1; 778 } 779 #endif 780 opt = prom_strstr(prom_cmd_line, "mem="); 781 if (opt) { 782 opt += 4; 783 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 784 #ifdef CONFIG_PPC64 785 /* Align to 16 MB == size of ppc64 large page */ 786 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 787 #endif 788 } 789 790 #ifdef CONFIG_PPC_PSERIES 791 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 792 opt = prom_strstr(prom_cmd_line, "disable_radix"); 793 if (opt) { 794 opt += 13; 795 if (*opt && *opt == '=') { 796 bool val; 797 798 if (prom_strtobool(++opt, &val)) 799 prom_radix_disable = false; 800 else 801 prom_radix_disable = val; 802 } else 803 prom_radix_disable = true; 804 } 805 if (prom_radix_disable) 806 prom_debug("Radix disabled from cmdline\n"); 807 #endif /* CONFIG_PPC_PSERIES */ 808 } 809 810 #ifdef CONFIG_PPC_PSERIES 811 /* 812 * The architecture vector has an array of PVR mask/value pairs, 813 * followed by # option vectors - 1, followed by the option vectors. 814 * 815 * See prom.h for the definition of the bits specified in the 816 * architecture vector. 817 */ 818 819 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 820 #define NUM_VECTORS(n) ((n) - 1) 821 822 /* 823 * Firmware expects 1 + n - 2, where n is the length of the option vector in 824 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 825 */ 826 #define VECTOR_LENGTH(n) (1 + (n) - 2) 827 828 struct option_vector1 { 829 u8 byte1; 830 u8 arch_versions; 831 u8 arch_versions3; 832 } __packed; 833 834 struct option_vector2 { 835 u8 byte1; 836 __be16 reserved; 837 __be32 real_base; 838 __be32 real_size; 839 __be32 virt_base; 840 __be32 virt_size; 841 __be32 load_base; 842 __be32 min_rma; 843 __be32 min_load; 844 u8 min_rma_percent; 845 u8 max_pft_size; 846 } __packed; 847 848 struct option_vector3 { 849 u8 byte1; 850 u8 byte2; 851 } __packed; 852 853 struct option_vector4 { 854 u8 byte1; 855 u8 min_vp_cap; 856 } __packed; 857 858 struct option_vector5 { 859 u8 byte1; 860 u8 byte2; 861 u8 byte3; 862 u8 cmo; 863 u8 associativity; 864 u8 bin_opts; 865 u8 micro_checkpoint; 866 u8 reserved0; 867 __be32 max_cpus; 868 __be16 papr_level; 869 __be16 reserved1; 870 u8 platform_facilities; 871 u8 reserved2; 872 __be16 reserved3; 873 u8 subprocessors; 874 u8 byte22; 875 u8 intarch; 876 u8 mmu; 877 u8 hash_ext; 878 u8 radix_ext; 879 } __packed; 880 881 struct option_vector6 { 882 u8 reserved; 883 u8 secondary_pteg; 884 u8 os_name; 885 } __packed; 886 887 struct ibm_arch_vec { 888 struct { u32 mask, val; } pvrs[12]; 889 890 u8 num_vectors; 891 892 u8 vec1_len; 893 struct option_vector1 vec1; 894 895 u8 vec2_len; 896 struct option_vector2 vec2; 897 898 u8 vec3_len; 899 struct option_vector3 vec3; 900 901 u8 vec4_len; 902 struct option_vector4 vec4; 903 904 u8 vec5_len; 905 struct option_vector5 vec5; 906 907 u8 vec6_len; 908 struct option_vector6 vec6; 909 } __packed; 910 911 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { 912 .pvrs = { 913 { 914 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 915 .val = cpu_to_be32(0x003a0000), 916 }, 917 { 918 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 919 .val = cpu_to_be32(0x003e0000), 920 }, 921 { 922 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 923 .val = cpu_to_be32(0x003f0000), 924 }, 925 { 926 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 927 .val = cpu_to_be32(0x004b0000), 928 }, 929 { 930 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 931 .val = cpu_to_be32(0x004c0000), 932 }, 933 { 934 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 935 .val = cpu_to_be32(0x004d0000), 936 }, 937 { 938 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 939 .val = cpu_to_be32(0x004e0000), 940 }, 941 { 942 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 943 .val = cpu_to_be32(0x0f000005), 944 }, 945 { 946 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 947 .val = cpu_to_be32(0x0f000004), 948 }, 949 { 950 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 951 .val = cpu_to_be32(0x0f000003), 952 }, 953 { 954 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 955 .val = cpu_to_be32(0x0f000002), 956 }, 957 { 958 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 959 .val = cpu_to_be32(0x0f000001), 960 }, 961 }, 962 963 .num_vectors = NUM_VECTORS(6), 964 965 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 966 .vec1 = { 967 .byte1 = 0, 968 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 969 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 970 .arch_versions3 = OV1_PPC_3_00, 971 }, 972 973 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 974 /* option vector 2: Open Firmware options supported */ 975 .vec2 = { 976 .byte1 = OV2_REAL_MODE, 977 .reserved = 0, 978 .real_base = cpu_to_be32(0xffffffff), 979 .real_size = cpu_to_be32(0xffffffff), 980 .virt_base = cpu_to_be32(0xffffffff), 981 .virt_size = cpu_to_be32(0xffffffff), 982 .load_base = cpu_to_be32(0xffffffff), 983 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 984 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 985 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 986 .max_pft_size = 48, /* max log_2(hash table size) */ 987 }, 988 989 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 990 /* option vector 3: processor options supported */ 991 .vec3 = { 992 .byte1 = 0, /* don't ignore, don't halt */ 993 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 994 }, 995 996 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 997 /* option vector 4: IBM PAPR implementation */ 998 .vec4 = { 999 .byte1 = 0, /* don't halt */ 1000 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 1001 }, 1002 1003 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 1004 /* option vector 5: PAPR/OF options */ 1005 .vec5 = { 1006 .byte1 = 0, /* don't ignore, don't halt */ 1007 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 1008 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 1009 #ifdef CONFIG_PCI_MSI 1010 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 1011 OV5_FEAT(OV5_MSI), 1012 #else 1013 0, 1014 #endif 1015 .byte3 = 0, 1016 .cmo = 1017 #ifdef CONFIG_PPC_SMLPAR 1018 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 1019 #else 1020 0, 1021 #endif 1022 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 1023 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 1024 .micro_checkpoint = 0, 1025 .reserved0 = 0, 1026 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 1027 .papr_level = 0, 1028 .reserved1 = 0, 1029 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 1030 .reserved2 = 0, 1031 .reserved3 = 0, 1032 .subprocessors = 1, 1033 .byte22 = OV5_FEAT(OV5_DRMEM_V2), 1034 .intarch = 0, 1035 .mmu = 0, 1036 .hash_ext = 0, 1037 .radix_ext = 0, 1038 }, 1039 1040 /* option vector 6: IBM PAPR hints */ 1041 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 1042 .vec6 = { 1043 .reserved = 0, 1044 .secondary_pteg = 0, 1045 .os_name = OV6_LINUX, 1046 }, 1047 }; 1048 1049 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; 1050 1051 /* Old method - ELF header with PT_NOTE sections only works on BE */ 1052 #ifdef __BIG_ENDIAN__ 1053 static const struct fake_elf { 1054 Elf32_Ehdr elfhdr; 1055 Elf32_Phdr phdr[2]; 1056 struct chrpnote { 1057 u32 namesz; 1058 u32 descsz; 1059 u32 type; 1060 char name[8]; /* "PowerPC" */ 1061 struct chrpdesc { 1062 u32 real_mode; 1063 u32 real_base; 1064 u32 real_size; 1065 u32 virt_base; 1066 u32 virt_size; 1067 u32 load_base; 1068 } chrpdesc; 1069 } chrpnote; 1070 struct rpanote { 1071 u32 namesz; 1072 u32 descsz; 1073 u32 type; 1074 char name[24]; /* "IBM,RPA-Client-Config" */ 1075 struct rpadesc { 1076 u32 lpar_affinity; 1077 u32 min_rmo_size; 1078 u32 min_rmo_percent; 1079 u32 max_pft_size; 1080 u32 splpar; 1081 u32 min_load; 1082 u32 new_mem_def; 1083 u32 ignore_me; 1084 } rpadesc; 1085 } rpanote; 1086 } fake_elf __initconst = { 1087 .elfhdr = { 1088 .e_ident = { 0x7f, 'E', 'L', 'F', 1089 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 1090 .e_type = ET_EXEC, /* yeah right */ 1091 .e_machine = EM_PPC, 1092 .e_version = EV_CURRENT, 1093 .e_phoff = offsetof(struct fake_elf, phdr), 1094 .e_phentsize = sizeof(Elf32_Phdr), 1095 .e_phnum = 2 1096 }, 1097 .phdr = { 1098 [0] = { 1099 .p_type = PT_NOTE, 1100 .p_offset = offsetof(struct fake_elf, chrpnote), 1101 .p_filesz = sizeof(struct chrpnote) 1102 }, [1] = { 1103 .p_type = PT_NOTE, 1104 .p_offset = offsetof(struct fake_elf, rpanote), 1105 .p_filesz = sizeof(struct rpanote) 1106 } 1107 }, 1108 .chrpnote = { 1109 .namesz = sizeof("PowerPC"), 1110 .descsz = sizeof(struct chrpdesc), 1111 .type = 0x1275, 1112 .name = "PowerPC", 1113 .chrpdesc = { 1114 .real_mode = ~0U, /* ~0 means "don't care" */ 1115 .real_base = ~0U, 1116 .real_size = ~0U, 1117 .virt_base = ~0U, 1118 .virt_size = ~0U, 1119 .load_base = ~0U 1120 }, 1121 }, 1122 .rpanote = { 1123 .namesz = sizeof("IBM,RPA-Client-Config"), 1124 .descsz = sizeof(struct rpadesc), 1125 .type = 0x12759999, 1126 .name = "IBM,RPA-Client-Config", 1127 .rpadesc = { 1128 .lpar_affinity = 0, 1129 .min_rmo_size = 64, /* in megabytes */ 1130 .min_rmo_percent = 0, 1131 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1132 .splpar = 1, 1133 .min_load = ~0U, 1134 .new_mem_def = 0 1135 } 1136 } 1137 }; 1138 #endif /* __BIG_ENDIAN__ */ 1139 1140 static int __init prom_count_smt_threads(void) 1141 { 1142 phandle node; 1143 char type[64]; 1144 unsigned int plen; 1145 1146 /* Pick up th first CPU node we can find */ 1147 for (node = 0; prom_next_node(&node); ) { 1148 type[0] = 0; 1149 prom_getprop(node, "device_type", type, sizeof(type)); 1150 1151 if (prom_strcmp(type, "cpu")) 1152 continue; 1153 /* 1154 * There is an entry for each smt thread, each entry being 1155 * 4 bytes long. All cpus should have the same number of 1156 * smt threads, so return after finding the first. 1157 */ 1158 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1159 if (plen == PROM_ERROR) 1160 break; 1161 plen >>= 2; 1162 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1163 1164 /* Sanity check */ 1165 if (plen < 1 || plen > 64) { 1166 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1167 (unsigned long)plen); 1168 return 1; 1169 } 1170 return plen; 1171 } 1172 prom_debug("No threads found, assuming 1 per core\n"); 1173 1174 return 1; 1175 1176 } 1177 1178 static void __init prom_parse_mmu_model(u8 val, 1179 struct platform_support *support) 1180 { 1181 switch (val) { 1182 case OV5_FEAT(OV5_MMU_DYNAMIC): 1183 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1184 prom_debug("MMU - either supported\n"); 1185 support->radix_mmu = !prom_radix_disable; 1186 support->hash_mmu = true; 1187 break; 1188 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1189 prom_debug("MMU - radix only\n"); 1190 if (prom_radix_disable) { 1191 /* 1192 * If we __have__ to do radix, we're better off ignoring 1193 * the command line rather than not booting. 1194 */ 1195 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1196 } 1197 support->radix_mmu = true; 1198 break; 1199 case OV5_FEAT(OV5_MMU_HASH): 1200 prom_debug("MMU - hash only\n"); 1201 support->hash_mmu = true; 1202 break; 1203 default: 1204 prom_debug("Unknown mmu support option: 0x%x\n", val); 1205 break; 1206 } 1207 } 1208 1209 static void __init prom_parse_xive_model(u8 val, 1210 struct platform_support *support) 1211 { 1212 switch (val) { 1213 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1214 prom_debug("XIVE - either mode supported\n"); 1215 support->xive = true; 1216 break; 1217 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1218 prom_debug("XIVE - exploitation mode supported\n"); 1219 support->xive = true; 1220 break; 1221 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1222 prom_debug("XIVE - legacy mode supported\n"); 1223 break; 1224 default: 1225 prom_debug("Unknown xive support option: 0x%x\n", val); 1226 break; 1227 } 1228 } 1229 1230 static void __init prom_parse_platform_support(u8 index, u8 val, 1231 struct platform_support *support) 1232 { 1233 switch (index) { 1234 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1235 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1236 break; 1237 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1238 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1239 prom_debug("Radix - GTSE supported\n"); 1240 support->radix_gtse = true; 1241 } 1242 break; 1243 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1244 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1245 support); 1246 break; 1247 } 1248 } 1249 1250 static void __init prom_check_platform_support(void) 1251 { 1252 struct platform_support supported = { 1253 .hash_mmu = false, 1254 .radix_mmu = false, 1255 .radix_gtse = false, 1256 .xive = false 1257 }; 1258 int prop_len = prom_getproplen(prom.chosen, 1259 "ibm,arch-vec-5-platform-support"); 1260 1261 /* 1262 * First copy the architecture vec template 1263 * 1264 * use memcpy() instead of *vec = *vec_template so that GCC replaces it 1265 * by __memcpy() when KASAN is active 1266 */ 1267 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, 1268 sizeof(ibm_architecture_vec)); 1269 1270 if (prop_len > 1) { 1271 int i; 1272 u8 vec[8]; 1273 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1274 prop_len); 1275 if (prop_len > sizeof(vec)) 1276 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", 1277 prop_len); 1278 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1279 &vec, sizeof(vec)); 1280 for (i = 0; i < sizeof(vec); i += 2) { 1281 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1282 , vec[i] 1283 , vec[i + 1]); 1284 prom_parse_platform_support(vec[i], vec[i + 1], 1285 &supported); 1286 } 1287 } 1288 1289 if (supported.radix_mmu && supported.radix_gtse && 1290 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1291 /* Radix preferred - but we require GTSE for now */ 1292 prom_debug("Asking for radix with GTSE\n"); 1293 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1294 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1295 } else if (supported.hash_mmu) { 1296 /* Default to hash mmu (if we can) */ 1297 prom_debug("Asking for hash\n"); 1298 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1299 } else { 1300 /* We're probably on a legacy hypervisor */ 1301 prom_debug("Assuming legacy hash support\n"); 1302 } 1303 1304 if (supported.xive) { 1305 prom_debug("Asking for XIVE\n"); 1306 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1307 } 1308 } 1309 1310 static void __init prom_send_capabilities(void) 1311 { 1312 ihandle root; 1313 prom_arg_t ret; 1314 u32 cores; 1315 1316 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1317 prom_check_platform_support(); 1318 1319 root = call_prom("open", 1, 1, ADDR("/")); 1320 if (root != 0) { 1321 /* We need to tell the FW about the number of cores we support. 1322 * 1323 * To do that, we count the number of threads on the first core 1324 * (we assume this is the same for all cores) and use it to 1325 * divide NR_CPUS. 1326 */ 1327 1328 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1329 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1330 cores, NR_CPUS); 1331 1332 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1333 1334 /* try calling the ibm,client-architecture-support method */ 1335 prom_printf("Calling ibm,client-architecture-support..."); 1336 if (call_prom_ret("call-method", 3, 2, &ret, 1337 ADDR("ibm,client-architecture-support"), 1338 root, 1339 ADDR(&ibm_architecture_vec)) == 0) { 1340 /* the call exists... */ 1341 if (ret) 1342 prom_printf("\nWARNING: ibm,client-architecture" 1343 "-support call FAILED!\n"); 1344 call_prom("close", 1, 0, root); 1345 prom_printf(" done\n"); 1346 return; 1347 } 1348 call_prom("close", 1, 0, root); 1349 prom_printf(" not implemented\n"); 1350 } 1351 1352 #ifdef __BIG_ENDIAN__ 1353 { 1354 ihandle elfloader; 1355 1356 /* no ibm,client-architecture-support call, try the old way */ 1357 elfloader = call_prom("open", 1, 1, 1358 ADDR("/packages/elf-loader")); 1359 if (elfloader == 0) { 1360 prom_printf("couldn't open /packages/elf-loader\n"); 1361 return; 1362 } 1363 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1364 elfloader, ADDR(&fake_elf)); 1365 call_prom("close", 1, 0, elfloader); 1366 } 1367 #endif /* __BIG_ENDIAN__ */ 1368 } 1369 #endif /* CONFIG_PPC_PSERIES */ 1370 1371 /* 1372 * Memory allocation strategy... our layout is normally: 1373 * 1374 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1375 * rare cases, initrd might end up being before the kernel though. 1376 * We assume this won't override the final kernel at 0, we have no 1377 * provision to handle that in this version, but it should hopefully 1378 * never happen. 1379 * 1380 * alloc_top is set to the top of RMO, eventually shrink down if the 1381 * TCEs overlap 1382 * 1383 * alloc_bottom is set to the top of kernel/initrd 1384 * 1385 * from there, allocations are done this way : rtas is allocated 1386 * topmost, and the device-tree is allocated from the bottom. We try 1387 * to grow the device-tree allocation as we progress. If we can't, 1388 * then we fail, we don't currently have a facility to restart 1389 * elsewhere, but that shouldn't be necessary. 1390 * 1391 * Note that calls to reserve_mem have to be done explicitly, memory 1392 * allocated with either alloc_up or alloc_down isn't automatically 1393 * reserved. 1394 */ 1395 1396 1397 /* 1398 * Allocates memory in the RMO upward from the kernel/initrd 1399 * 1400 * When align is 0, this is a special case, it means to allocate in place 1401 * at the current location of alloc_bottom or fail (that is basically 1402 * extending the previous allocation). Used for the device-tree flattening 1403 */ 1404 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1405 { 1406 unsigned long base = alloc_bottom; 1407 unsigned long addr = 0; 1408 1409 if (align) 1410 base = _ALIGN_UP(base, align); 1411 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1412 if (ram_top == 0) 1413 prom_panic("alloc_up() called with mem not initialized\n"); 1414 1415 if (align) 1416 base = _ALIGN_UP(alloc_bottom, align); 1417 else 1418 base = alloc_bottom; 1419 1420 for(; (base + size) <= alloc_top; 1421 base = _ALIGN_UP(base + 0x100000, align)) { 1422 prom_debug(" trying: 0x%lx\n\r", base); 1423 addr = (unsigned long)prom_claim(base, size, 0); 1424 if (addr != PROM_ERROR && addr != 0) 1425 break; 1426 addr = 0; 1427 if (align == 0) 1428 break; 1429 } 1430 if (addr == 0) 1431 return 0; 1432 alloc_bottom = addr + size; 1433 1434 prom_debug(" -> %lx\n", addr); 1435 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1436 prom_debug(" alloc_top : %lx\n", alloc_top); 1437 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1438 prom_debug(" rmo_top : %lx\n", rmo_top); 1439 prom_debug(" ram_top : %lx\n", ram_top); 1440 1441 return addr; 1442 } 1443 1444 /* 1445 * Allocates memory downward, either from top of RMO, or if highmem 1446 * is set, from the top of RAM. Note that this one doesn't handle 1447 * failures. It does claim memory if highmem is not set. 1448 */ 1449 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1450 int highmem) 1451 { 1452 unsigned long base, addr = 0; 1453 1454 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1455 highmem ? "(high)" : "(low)"); 1456 if (ram_top == 0) 1457 prom_panic("alloc_down() called with mem not initialized\n"); 1458 1459 if (highmem) { 1460 /* Carve out storage for the TCE table. */ 1461 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1462 if (addr <= alloc_bottom) 1463 return 0; 1464 /* Will we bump into the RMO ? If yes, check out that we 1465 * didn't overlap existing allocations there, if we did, 1466 * we are dead, we must be the first in town ! 1467 */ 1468 if (addr < rmo_top) { 1469 /* Good, we are first */ 1470 if (alloc_top == rmo_top) 1471 alloc_top = rmo_top = addr; 1472 else 1473 return 0; 1474 } 1475 alloc_top_high = addr; 1476 goto bail; 1477 } 1478 1479 base = _ALIGN_DOWN(alloc_top - size, align); 1480 for (; base > alloc_bottom; 1481 base = _ALIGN_DOWN(base - 0x100000, align)) { 1482 prom_debug(" trying: 0x%lx\n\r", base); 1483 addr = (unsigned long)prom_claim(base, size, 0); 1484 if (addr != PROM_ERROR && addr != 0) 1485 break; 1486 addr = 0; 1487 } 1488 if (addr == 0) 1489 return 0; 1490 alloc_top = addr; 1491 1492 bail: 1493 prom_debug(" -> %lx\n", addr); 1494 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1495 prom_debug(" alloc_top : %lx\n", alloc_top); 1496 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1497 prom_debug(" rmo_top : %lx\n", rmo_top); 1498 prom_debug(" ram_top : %lx\n", ram_top); 1499 1500 return addr; 1501 } 1502 1503 /* 1504 * Parse a "reg" cell 1505 */ 1506 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1507 { 1508 cell_t *p = *cellp; 1509 unsigned long r = 0; 1510 1511 /* Ignore more than 2 cells */ 1512 while (s > sizeof(unsigned long) / 4) { 1513 p++; 1514 s--; 1515 } 1516 r = be32_to_cpu(*p++); 1517 #ifdef CONFIG_PPC64 1518 if (s > 1) { 1519 r <<= 32; 1520 r |= be32_to_cpu(*(p++)); 1521 } 1522 #endif 1523 *cellp = p; 1524 return r; 1525 } 1526 1527 /* 1528 * Very dumb function for adding to the memory reserve list, but 1529 * we don't need anything smarter at this point 1530 * 1531 * XXX Eventually check for collisions. They should NEVER happen. 1532 * If problems seem to show up, it would be a good start to track 1533 * them down. 1534 */ 1535 static void __init reserve_mem(u64 base, u64 size) 1536 { 1537 u64 top = base + size; 1538 unsigned long cnt = mem_reserve_cnt; 1539 1540 if (size == 0) 1541 return; 1542 1543 /* We need to always keep one empty entry so that we 1544 * have our terminator with "size" set to 0 since we are 1545 * dumb and just copy this entire array to the boot params 1546 */ 1547 base = _ALIGN_DOWN(base, PAGE_SIZE); 1548 top = _ALIGN_UP(top, PAGE_SIZE); 1549 size = top - base; 1550 1551 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1552 prom_panic("Memory reserve map exhausted !\n"); 1553 mem_reserve_map[cnt].base = cpu_to_be64(base); 1554 mem_reserve_map[cnt].size = cpu_to_be64(size); 1555 mem_reserve_cnt = cnt + 1; 1556 } 1557 1558 /* 1559 * Initialize memory allocation mechanism, parse "memory" nodes and 1560 * obtain that way the top of memory and RMO to setup out local allocator 1561 */ 1562 static void __init prom_init_mem(void) 1563 { 1564 phandle node; 1565 #ifdef DEBUG_PROM 1566 char *path; 1567 #endif 1568 char type[64]; 1569 unsigned int plen; 1570 cell_t *p, *endp; 1571 __be32 val; 1572 u32 rac, rsc; 1573 1574 /* 1575 * We iterate the memory nodes to find 1576 * 1) top of RMO (first node) 1577 * 2) top of memory 1578 */ 1579 val = cpu_to_be32(2); 1580 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1581 rac = be32_to_cpu(val); 1582 val = cpu_to_be32(1); 1583 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1584 rsc = be32_to_cpu(val); 1585 prom_debug("root_addr_cells: %x\n", rac); 1586 prom_debug("root_size_cells: %x\n", rsc); 1587 1588 prom_debug("scanning memory:\n"); 1589 #ifdef DEBUG_PROM 1590 path = prom_scratch; 1591 #endif 1592 1593 for (node = 0; prom_next_node(&node); ) { 1594 type[0] = 0; 1595 prom_getprop(node, "device_type", type, sizeof(type)); 1596 1597 if (type[0] == 0) { 1598 /* 1599 * CHRP Longtrail machines have no device_type 1600 * on the memory node, so check the name instead... 1601 */ 1602 prom_getprop(node, "name", type, sizeof(type)); 1603 } 1604 if (prom_strcmp(type, "memory")) 1605 continue; 1606 1607 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1608 if (plen > sizeof(regbuf)) { 1609 prom_printf("memory node too large for buffer !\n"); 1610 plen = sizeof(regbuf); 1611 } 1612 p = regbuf; 1613 endp = p + (plen / sizeof(cell_t)); 1614 1615 #ifdef DEBUG_PROM 1616 memset(path, 0, sizeof(prom_scratch)); 1617 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 1618 prom_debug(" node %s :\n", path); 1619 #endif /* DEBUG_PROM */ 1620 1621 while ((endp - p) >= (rac + rsc)) { 1622 unsigned long base, size; 1623 1624 base = prom_next_cell(rac, &p); 1625 size = prom_next_cell(rsc, &p); 1626 1627 if (size == 0) 1628 continue; 1629 prom_debug(" %lx %lx\n", base, size); 1630 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1631 rmo_top = size; 1632 if ((base + size) > ram_top) 1633 ram_top = base + size; 1634 } 1635 } 1636 1637 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1638 1639 /* 1640 * If prom_memory_limit is set we reduce the upper limits *except* for 1641 * alloc_top_high. This must be the real top of RAM so we can put 1642 * TCE's up there. 1643 */ 1644 1645 alloc_top_high = ram_top; 1646 1647 if (prom_memory_limit) { 1648 if (prom_memory_limit <= alloc_bottom) { 1649 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1650 prom_memory_limit); 1651 prom_memory_limit = 0; 1652 } else if (prom_memory_limit >= ram_top) { 1653 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1654 prom_memory_limit); 1655 prom_memory_limit = 0; 1656 } else { 1657 ram_top = prom_memory_limit; 1658 rmo_top = min(rmo_top, prom_memory_limit); 1659 } 1660 } 1661 1662 /* 1663 * Setup our top alloc point, that is top of RMO or top of 1664 * segment 0 when running non-LPAR. 1665 * Some RS64 machines have buggy firmware where claims up at 1666 * 1GB fail. Cap at 768MB as a workaround. 1667 * Since 768MB is plenty of room, and we need to cap to something 1668 * reasonable on 32-bit, cap at 768MB on all machines. 1669 */ 1670 if (!rmo_top) 1671 rmo_top = ram_top; 1672 rmo_top = min(0x30000000ul, rmo_top); 1673 alloc_top = rmo_top; 1674 alloc_top_high = ram_top; 1675 1676 /* 1677 * Check if we have an initrd after the kernel but still inside 1678 * the RMO. If we do move our bottom point to after it. 1679 */ 1680 if (prom_initrd_start && 1681 prom_initrd_start < rmo_top && 1682 prom_initrd_end > alloc_bottom) 1683 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1684 1685 prom_printf("memory layout at init:\n"); 1686 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1687 prom_memory_limit); 1688 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1689 prom_printf(" alloc_top : %lx\n", alloc_top); 1690 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1691 prom_printf(" rmo_top : %lx\n", rmo_top); 1692 prom_printf(" ram_top : %lx\n", ram_top); 1693 } 1694 1695 static void __init prom_close_stdin(void) 1696 { 1697 __be32 val; 1698 ihandle stdin; 1699 1700 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1701 stdin = be32_to_cpu(val); 1702 call_prom("close", 1, 0, stdin); 1703 } 1704 } 1705 1706 /* 1707 * Allocate room for and instantiate RTAS 1708 */ 1709 static void __init prom_instantiate_rtas(void) 1710 { 1711 phandle rtas_node; 1712 ihandle rtas_inst; 1713 u32 base, entry = 0; 1714 __be32 val; 1715 u32 size = 0; 1716 1717 prom_debug("prom_instantiate_rtas: start...\n"); 1718 1719 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1720 prom_debug("rtas_node: %x\n", rtas_node); 1721 if (!PHANDLE_VALID(rtas_node)) 1722 return; 1723 1724 val = 0; 1725 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1726 size = be32_to_cpu(val); 1727 if (size == 0) 1728 return; 1729 1730 base = alloc_down(size, PAGE_SIZE, 0); 1731 if (base == 0) 1732 prom_panic("Could not allocate memory for RTAS\n"); 1733 1734 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1735 if (!IHANDLE_VALID(rtas_inst)) { 1736 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1737 return; 1738 } 1739 1740 prom_printf("instantiating rtas at 0x%x...", base); 1741 1742 if (call_prom_ret("call-method", 3, 2, &entry, 1743 ADDR("instantiate-rtas"), 1744 rtas_inst, base) != 0 1745 || entry == 0) { 1746 prom_printf(" failed\n"); 1747 return; 1748 } 1749 prom_printf(" done\n"); 1750 1751 reserve_mem(base, size); 1752 1753 val = cpu_to_be32(base); 1754 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1755 &val, sizeof(val)); 1756 val = cpu_to_be32(entry); 1757 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1758 &val, sizeof(val)); 1759 1760 /* Check if it supports "query-cpu-stopped-state" */ 1761 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1762 &val, sizeof(val)) != PROM_ERROR) 1763 rtas_has_query_cpu_stopped = true; 1764 1765 prom_debug("rtas base = 0x%x\n", base); 1766 prom_debug("rtas entry = 0x%x\n", entry); 1767 prom_debug("rtas size = 0x%x\n", size); 1768 1769 prom_debug("prom_instantiate_rtas: end...\n"); 1770 } 1771 1772 #ifdef CONFIG_PPC64 1773 /* 1774 * Allocate room for and instantiate Stored Measurement Log (SML) 1775 */ 1776 static void __init prom_instantiate_sml(void) 1777 { 1778 phandle ibmvtpm_node; 1779 ihandle ibmvtpm_inst; 1780 u32 entry = 0, size = 0, succ = 0; 1781 u64 base; 1782 __be32 val; 1783 1784 prom_debug("prom_instantiate_sml: start...\n"); 1785 1786 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1787 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1788 if (!PHANDLE_VALID(ibmvtpm_node)) 1789 return; 1790 1791 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1792 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1793 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1794 return; 1795 } 1796 1797 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1798 &val, sizeof(val)) != PROM_ERROR) { 1799 if (call_prom_ret("call-method", 2, 2, &succ, 1800 ADDR("reformat-sml-to-efi-alignment"), 1801 ibmvtpm_inst) != 0 || succ == 0) { 1802 prom_printf("Reformat SML to EFI alignment failed\n"); 1803 return; 1804 } 1805 1806 if (call_prom_ret("call-method", 2, 2, &size, 1807 ADDR("sml-get-allocated-size"), 1808 ibmvtpm_inst) != 0 || size == 0) { 1809 prom_printf("SML get allocated size failed\n"); 1810 return; 1811 } 1812 } else { 1813 if (call_prom_ret("call-method", 2, 2, &size, 1814 ADDR("sml-get-handover-size"), 1815 ibmvtpm_inst) != 0 || size == 0) { 1816 prom_printf("SML get handover size failed\n"); 1817 return; 1818 } 1819 } 1820 1821 base = alloc_down(size, PAGE_SIZE, 0); 1822 if (base == 0) 1823 prom_panic("Could not allocate memory for sml\n"); 1824 1825 prom_printf("instantiating sml at 0x%llx...", base); 1826 1827 memset((void *)base, 0, size); 1828 1829 if (call_prom_ret("call-method", 4, 2, &entry, 1830 ADDR("sml-handover"), 1831 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1832 prom_printf("SML handover failed\n"); 1833 return; 1834 } 1835 prom_printf(" done\n"); 1836 1837 reserve_mem(base, size); 1838 1839 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1840 &base, sizeof(base)); 1841 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1842 &size, sizeof(size)); 1843 1844 prom_debug("sml base = 0x%llx\n", base); 1845 prom_debug("sml size = 0x%x\n", size); 1846 1847 prom_debug("prom_instantiate_sml: end...\n"); 1848 } 1849 1850 /* 1851 * Allocate room for and initialize TCE tables 1852 */ 1853 #ifdef __BIG_ENDIAN__ 1854 static void __init prom_initialize_tce_table(void) 1855 { 1856 phandle node; 1857 ihandle phb_node; 1858 char compatible[64], type[64], model[64]; 1859 char *path = prom_scratch; 1860 u64 base, align; 1861 u32 minalign, minsize; 1862 u64 tce_entry, *tce_entryp; 1863 u64 local_alloc_top, local_alloc_bottom; 1864 u64 i; 1865 1866 if (prom_iommu_off) 1867 return; 1868 1869 prom_debug("starting prom_initialize_tce_table\n"); 1870 1871 /* Cache current top of allocs so we reserve a single block */ 1872 local_alloc_top = alloc_top_high; 1873 local_alloc_bottom = local_alloc_top; 1874 1875 /* Search all nodes looking for PHBs. */ 1876 for (node = 0; prom_next_node(&node); ) { 1877 compatible[0] = 0; 1878 type[0] = 0; 1879 model[0] = 0; 1880 prom_getprop(node, "compatible", 1881 compatible, sizeof(compatible)); 1882 prom_getprop(node, "device_type", type, sizeof(type)); 1883 prom_getprop(node, "model", model, sizeof(model)); 1884 1885 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL)) 1886 continue; 1887 1888 /* Keep the old logic intact to avoid regression. */ 1889 if (compatible[0] != 0) { 1890 if ((prom_strstr(compatible, "python") == NULL) && 1891 (prom_strstr(compatible, "Speedwagon") == NULL) && 1892 (prom_strstr(compatible, "Winnipeg") == NULL)) 1893 continue; 1894 } else if (model[0] != 0) { 1895 if ((prom_strstr(model, "ython") == NULL) && 1896 (prom_strstr(model, "peedwagon") == NULL) && 1897 (prom_strstr(model, "innipeg") == NULL)) 1898 continue; 1899 } 1900 1901 if (prom_getprop(node, "tce-table-minalign", &minalign, 1902 sizeof(minalign)) == PROM_ERROR) 1903 minalign = 0; 1904 if (prom_getprop(node, "tce-table-minsize", &minsize, 1905 sizeof(minsize)) == PROM_ERROR) 1906 minsize = 4UL << 20; 1907 1908 /* 1909 * Even though we read what OF wants, we just set the table 1910 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1911 * By doing this, we avoid the pitfalls of trying to DMA to 1912 * MMIO space and the DMA alias hole. 1913 */ 1914 minsize = 4UL << 20; 1915 1916 /* Align to the greater of the align or size */ 1917 align = max(minalign, minsize); 1918 base = alloc_down(minsize, align, 1); 1919 if (base == 0) 1920 prom_panic("ERROR, cannot find space for TCE table.\n"); 1921 if (base < local_alloc_bottom) 1922 local_alloc_bottom = base; 1923 1924 /* It seems OF doesn't null-terminate the path :-( */ 1925 memset(path, 0, sizeof(prom_scratch)); 1926 /* Call OF to setup the TCE hardware */ 1927 if (call_prom("package-to-path", 3, 1, node, 1928 path, sizeof(prom_scratch) - 1) == PROM_ERROR) { 1929 prom_printf("package-to-path failed\n"); 1930 } 1931 1932 /* Save away the TCE table attributes for later use. */ 1933 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1934 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1935 1936 prom_debug("TCE table: %s\n", path); 1937 prom_debug("\tnode = 0x%x\n", node); 1938 prom_debug("\tbase = 0x%llx\n", base); 1939 prom_debug("\tsize = 0x%x\n", minsize); 1940 1941 /* Initialize the table to have a one-to-one mapping 1942 * over the allocated size. 1943 */ 1944 tce_entryp = (u64 *)base; 1945 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1946 tce_entry = (i << PAGE_SHIFT); 1947 tce_entry |= 0x3; 1948 *tce_entryp = tce_entry; 1949 } 1950 1951 prom_printf("opening PHB %s", path); 1952 phb_node = call_prom("open", 1, 1, path); 1953 if (phb_node == 0) 1954 prom_printf("... failed\n"); 1955 else 1956 prom_printf("... done\n"); 1957 1958 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 1959 phb_node, -1, minsize, 1960 (u32) base, (u32) (base >> 32)); 1961 call_prom("close", 1, 0, phb_node); 1962 } 1963 1964 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 1965 1966 /* These are only really needed if there is a memory limit in 1967 * effect, but we don't know so export them always. */ 1968 prom_tce_alloc_start = local_alloc_bottom; 1969 prom_tce_alloc_end = local_alloc_top; 1970 1971 /* Flag the first invalid entry */ 1972 prom_debug("ending prom_initialize_tce_table\n"); 1973 } 1974 #endif /* __BIG_ENDIAN__ */ 1975 #endif /* CONFIG_PPC64 */ 1976 1977 /* 1978 * With CHRP SMP we need to use the OF to start the other processors. 1979 * We can't wait until smp_boot_cpus (the OF is trashed by then) 1980 * so we have to put the processors into a holding pattern controlled 1981 * by the kernel (not OF) before we destroy the OF. 1982 * 1983 * This uses a chunk of low memory, puts some holding pattern 1984 * code there and sends the other processors off to there until 1985 * smp_boot_cpus tells them to do something. The holding pattern 1986 * checks that address until its cpu # is there, when it is that 1987 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 1988 * of setting those values. 1989 * 1990 * We also use physical address 0x4 here to tell when a cpu 1991 * is in its holding pattern code. 1992 * 1993 * -- Cort 1994 */ 1995 /* 1996 * We want to reference the copy of __secondary_hold_* in the 1997 * 0 - 0x100 address range 1998 */ 1999 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 2000 2001 static void __init prom_hold_cpus(void) 2002 { 2003 unsigned long i; 2004 phandle node; 2005 char type[64]; 2006 unsigned long *spinloop 2007 = (void *) LOW_ADDR(__secondary_hold_spinloop); 2008 unsigned long *acknowledge 2009 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 2010 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 2011 2012 /* 2013 * On pseries, if RTAS supports "query-cpu-stopped-state", 2014 * we skip this stage, the CPUs will be started by the 2015 * kernel using RTAS. 2016 */ 2017 if ((of_platform == PLATFORM_PSERIES || 2018 of_platform == PLATFORM_PSERIES_LPAR) && 2019 rtas_has_query_cpu_stopped) { 2020 prom_printf("prom_hold_cpus: skipped\n"); 2021 return; 2022 } 2023 2024 prom_debug("prom_hold_cpus: start...\n"); 2025 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 2026 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 2027 prom_debug(" 1) acknowledge = 0x%lx\n", 2028 (unsigned long)acknowledge); 2029 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 2030 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 2031 2032 /* Set the common spinloop variable, so all of the secondary cpus 2033 * will block when they are awakened from their OF spinloop. 2034 * This must occur for both SMP and non SMP kernels, since OF will 2035 * be trashed when we move the kernel. 2036 */ 2037 *spinloop = 0; 2038 2039 /* look for cpus */ 2040 for (node = 0; prom_next_node(&node); ) { 2041 unsigned int cpu_no; 2042 __be32 reg; 2043 2044 type[0] = 0; 2045 prom_getprop(node, "device_type", type, sizeof(type)); 2046 if (prom_strcmp(type, "cpu") != 0) 2047 continue; 2048 2049 /* Skip non-configured cpus. */ 2050 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 2051 if (prom_strcmp(type, "okay") != 0) 2052 continue; 2053 2054 reg = cpu_to_be32(-1); /* make sparse happy */ 2055 prom_getprop(node, "reg", ®, sizeof(reg)); 2056 cpu_no = be32_to_cpu(reg); 2057 2058 prom_debug("cpu hw idx = %u\n", cpu_no); 2059 2060 /* Init the acknowledge var which will be reset by 2061 * the secondary cpu when it awakens from its OF 2062 * spinloop. 2063 */ 2064 *acknowledge = (unsigned long)-1; 2065 2066 if (cpu_no != prom.cpu) { 2067 /* Primary Thread of non-boot cpu or any thread */ 2068 prom_printf("starting cpu hw idx %u... ", cpu_no); 2069 call_prom("start-cpu", 3, 0, node, 2070 secondary_hold, cpu_no); 2071 2072 for (i = 0; (i < 100000000) && 2073 (*acknowledge == ((unsigned long)-1)); i++ ) 2074 mb(); 2075 2076 if (*acknowledge == cpu_no) 2077 prom_printf("done\n"); 2078 else 2079 prom_printf("failed: %lx\n", *acknowledge); 2080 } 2081 #ifdef CONFIG_SMP 2082 else 2083 prom_printf("boot cpu hw idx %u\n", cpu_no); 2084 #endif /* CONFIG_SMP */ 2085 } 2086 2087 prom_debug("prom_hold_cpus: end...\n"); 2088 } 2089 2090 2091 static void __init prom_init_client_services(unsigned long pp) 2092 { 2093 /* Get a handle to the prom entry point before anything else */ 2094 prom_entry = pp; 2095 2096 /* get a handle for the stdout device */ 2097 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2098 if (!PHANDLE_VALID(prom.chosen)) 2099 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2100 2101 /* get device tree root */ 2102 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2103 if (!PHANDLE_VALID(prom.root)) 2104 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2105 2106 prom.mmumap = 0; 2107 } 2108 2109 #ifdef CONFIG_PPC32 2110 /* 2111 * For really old powermacs, we need to map things we claim. 2112 * For that, we need the ihandle of the mmu. 2113 * Also, on the longtrail, we need to work around other bugs. 2114 */ 2115 static void __init prom_find_mmu(void) 2116 { 2117 phandle oprom; 2118 char version[64]; 2119 2120 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2121 if (!PHANDLE_VALID(oprom)) 2122 return; 2123 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2124 return; 2125 version[sizeof(version) - 1] = 0; 2126 /* XXX might need to add other versions here */ 2127 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0) 2128 of_workarounds = OF_WA_CLAIM; 2129 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) { 2130 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2131 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2132 } else 2133 return; 2134 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2135 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2136 sizeof(prom.mmumap)); 2137 prom.mmumap = be32_to_cpu(prom.mmumap); 2138 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2139 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2140 } 2141 #else 2142 #define prom_find_mmu() 2143 #endif 2144 2145 static void __init prom_init_stdout(void) 2146 { 2147 char *path = of_stdout_device; 2148 char type[16]; 2149 phandle stdout_node; 2150 __be32 val; 2151 2152 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2153 prom_panic("cannot find stdout"); 2154 2155 prom.stdout = be32_to_cpu(val); 2156 2157 /* Get the full OF pathname of the stdout device */ 2158 memset(path, 0, 256); 2159 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2160 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2161 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2162 path, prom_strlen(path) + 1); 2163 2164 /* instance-to-package fails on PA-Semi */ 2165 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2166 if (stdout_node != PROM_ERROR) { 2167 val = cpu_to_be32(stdout_node); 2168 2169 /* If it's a display, note it */ 2170 memset(type, 0, sizeof(type)); 2171 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2172 if (prom_strcmp(type, "display") == 0) 2173 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2174 } 2175 } 2176 2177 static int __init prom_find_machine_type(void) 2178 { 2179 char compat[256]; 2180 int len, i = 0; 2181 #ifdef CONFIG_PPC64 2182 phandle rtas; 2183 int x; 2184 #endif 2185 2186 /* Look for a PowerMac or a Cell */ 2187 len = prom_getprop(prom.root, "compatible", 2188 compat, sizeof(compat)-1); 2189 if (len > 0) { 2190 compat[len] = 0; 2191 while (i < len) { 2192 char *p = &compat[i]; 2193 int sl = prom_strlen(p); 2194 if (sl == 0) 2195 break; 2196 if (prom_strstr(p, "Power Macintosh") || 2197 prom_strstr(p, "MacRISC")) 2198 return PLATFORM_POWERMAC; 2199 #ifdef CONFIG_PPC64 2200 /* We must make sure we don't detect the IBM Cell 2201 * blades as pSeries due to some firmware issues, 2202 * so we do it here. 2203 */ 2204 if (prom_strstr(p, "IBM,CBEA") || 2205 prom_strstr(p, "IBM,CPBW-1.0")) 2206 return PLATFORM_GENERIC; 2207 #endif /* CONFIG_PPC64 */ 2208 i += sl + 1; 2209 } 2210 } 2211 #ifdef CONFIG_PPC64 2212 /* Try to figure out if it's an IBM pSeries or any other 2213 * PAPR compliant platform. We assume it is if : 2214 * - /device_type is "chrp" (please, do NOT use that for future 2215 * non-IBM designs ! 2216 * - it has /rtas 2217 */ 2218 len = prom_getprop(prom.root, "device_type", 2219 compat, sizeof(compat)-1); 2220 if (len <= 0) 2221 return PLATFORM_GENERIC; 2222 if (prom_strcmp(compat, "chrp")) 2223 return PLATFORM_GENERIC; 2224 2225 /* Default to pSeries. We need to know if we are running LPAR */ 2226 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2227 if (!PHANDLE_VALID(rtas)) 2228 return PLATFORM_GENERIC; 2229 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2230 if (x != PROM_ERROR) { 2231 prom_debug("Hypertas detected, assuming LPAR !\n"); 2232 return PLATFORM_PSERIES_LPAR; 2233 } 2234 return PLATFORM_PSERIES; 2235 #else 2236 return PLATFORM_GENERIC; 2237 #endif 2238 } 2239 2240 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2241 { 2242 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2243 } 2244 2245 /* 2246 * If we have a display that we don't know how to drive, 2247 * we will want to try to execute OF's open method for it 2248 * later. However, OF will probably fall over if we do that 2249 * we've taken over the MMU. 2250 * So we check whether we will need to open the display, 2251 * and if so, open it now. 2252 */ 2253 static void __init prom_check_displays(void) 2254 { 2255 char type[16], *path; 2256 phandle node; 2257 ihandle ih; 2258 int i; 2259 2260 static const unsigned char default_colors[] __initconst = { 2261 0x00, 0x00, 0x00, 2262 0x00, 0x00, 0xaa, 2263 0x00, 0xaa, 0x00, 2264 0x00, 0xaa, 0xaa, 2265 0xaa, 0x00, 0x00, 2266 0xaa, 0x00, 0xaa, 2267 0xaa, 0xaa, 0x00, 2268 0xaa, 0xaa, 0xaa, 2269 0x55, 0x55, 0x55, 2270 0x55, 0x55, 0xff, 2271 0x55, 0xff, 0x55, 2272 0x55, 0xff, 0xff, 2273 0xff, 0x55, 0x55, 2274 0xff, 0x55, 0xff, 2275 0xff, 0xff, 0x55, 2276 0xff, 0xff, 0xff 2277 }; 2278 const unsigned char *clut; 2279 2280 prom_debug("Looking for displays\n"); 2281 for (node = 0; prom_next_node(&node); ) { 2282 memset(type, 0, sizeof(type)); 2283 prom_getprop(node, "device_type", type, sizeof(type)); 2284 if (prom_strcmp(type, "display") != 0) 2285 continue; 2286 2287 /* It seems OF doesn't null-terminate the path :-( */ 2288 path = prom_scratch; 2289 memset(path, 0, sizeof(prom_scratch)); 2290 2291 /* 2292 * leave some room at the end of the path for appending extra 2293 * arguments 2294 */ 2295 if (call_prom("package-to-path", 3, 1, node, path, 2296 sizeof(prom_scratch) - 10) == PROM_ERROR) 2297 continue; 2298 prom_printf("found display : %s, opening... ", path); 2299 2300 ih = call_prom("open", 1, 1, path); 2301 if (ih == 0) { 2302 prom_printf("failed\n"); 2303 continue; 2304 } 2305 2306 /* Success */ 2307 prom_printf("done\n"); 2308 prom_setprop(node, path, "linux,opened", NULL, 0); 2309 2310 /* Setup a usable color table when the appropriate 2311 * method is available. Should update this to set-colors */ 2312 clut = default_colors; 2313 for (i = 0; i < 16; i++, clut += 3) 2314 if (prom_set_color(ih, i, clut[0], clut[1], 2315 clut[2]) != 0) 2316 break; 2317 2318 #ifdef CONFIG_LOGO_LINUX_CLUT224 2319 clut = PTRRELOC(logo_linux_clut224.clut); 2320 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2321 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2322 clut[2]) != 0) 2323 break; 2324 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2325 2326 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2327 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2328 PROM_ERROR) { 2329 u32 width, height, pitch, addr; 2330 2331 prom_printf("Setting btext !\n"); 2332 prom_getprop(node, "width", &width, 4); 2333 prom_getprop(node, "height", &height, 4); 2334 prom_getprop(node, "linebytes", &pitch, 4); 2335 prom_getprop(node, "address", &addr, 4); 2336 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2337 width, height, pitch, addr); 2338 btext_setup_display(width, height, 8, pitch, addr); 2339 btext_prepare_BAT(); 2340 } 2341 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2342 } 2343 } 2344 2345 2346 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2347 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2348 unsigned long needed, unsigned long align) 2349 { 2350 void *ret; 2351 2352 *mem_start = _ALIGN(*mem_start, align); 2353 while ((*mem_start + needed) > *mem_end) { 2354 unsigned long room, chunk; 2355 2356 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2357 alloc_bottom); 2358 room = alloc_top - alloc_bottom; 2359 if (room > DEVTREE_CHUNK_SIZE) 2360 room = DEVTREE_CHUNK_SIZE; 2361 if (room < PAGE_SIZE) 2362 prom_panic("No memory for flatten_device_tree " 2363 "(no room)\n"); 2364 chunk = alloc_up(room, 0); 2365 if (chunk == 0) 2366 prom_panic("No memory for flatten_device_tree " 2367 "(claim failed)\n"); 2368 *mem_end = chunk + room; 2369 } 2370 2371 ret = (void *)*mem_start; 2372 *mem_start += needed; 2373 2374 return ret; 2375 } 2376 2377 #define dt_push_token(token, mem_start, mem_end) do { \ 2378 void *room = make_room(mem_start, mem_end, 4, 4); \ 2379 *(__be32 *)room = cpu_to_be32(token); \ 2380 } while(0) 2381 2382 static unsigned long __init dt_find_string(char *str) 2383 { 2384 char *s, *os; 2385 2386 s = os = (char *)dt_string_start; 2387 s += 4; 2388 while (s < (char *)dt_string_end) { 2389 if (prom_strcmp(s, str) == 0) 2390 return s - os; 2391 s += prom_strlen(s) + 1; 2392 } 2393 return 0; 2394 } 2395 2396 /* 2397 * The Open Firmware 1275 specification states properties must be 31 bytes or 2398 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2399 */ 2400 #define MAX_PROPERTY_NAME 64 2401 2402 static void __init scan_dt_build_strings(phandle node, 2403 unsigned long *mem_start, 2404 unsigned long *mem_end) 2405 { 2406 char *prev_name, *namep, *sstart; 2407 unsigned long soff; 2408 phandle child; 2409 2410 sstart = (char *)dt_string_start; 2411 2412 /* get and store all property names */ 2413 prev_name = ""; 2414 for (;;) { 2415 /* 64 is max len of name including nul. */ 2416 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2417 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2418 /* No more nodes: unwind alloc */ 2419 *mem_start = (unsigned long)namep; 2420 break; 2421 } 2422 2423 /* skip "name" */ 2424 if (prom_strcmp(namep, "name") == 0) { 2425 *mem_start = (unsigned long)namep; 2426 prev_name = "name"; 2427 continue; 2428 } 2429 /* get/create string entry */ 2430 soff = dt_find_string(namep); 2431 if (soff != 0) { 2432 *mem_start = (unsigned long)namep; 2433 namep = sstart + soff; 2434 } else { 2435 /* Trim off some if we can */ 2436 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2437 dt_string_end = *mem_start; 2438 } 2439 prev_name = namep; 2440 } 2441 2442 /* do all our children */ 2443 child = call_prom("child", 1, 1, node); 2444 while (child != 0) { 2445 scan_dt_build_strings(child, mem_start, mem_end); 2446 child = call_prom("peer", 1, 1, child); 2447 } 2448 } 2449 2450 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2451 unsigned long *mem_end) 2452 { 2453 phandle child; 2454 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2455 unsigned long soff; 2456 unsigned char *valp; 2457 static char pname[MAX_PROPERTY_NAME] __prombss; 2458 int l, room, has_phandle = 0; 2459 2460 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2461 2462 /* get the node's full name */ 2463 namep = (char *)*mem_start; 2464 room = *mem_end - *mem_start; 2465 if (room > 255) 2466 room = 255; 2467 l = call_prom("package-to-path", 3, 1, node, namep, room); 2468 if (l >= 0) { 2469 /* Didn't fit? Get more room. */ 2470 if (l >= room) { 2471 if (l >= *mem_end - *mem_start) 2472 namep = make_room(mem_start, mem_end, l+1, 1); 2473 call_prom("package-to-path", 3, 1, node, namep, l); 2474 } 2475 namep[l] = '\0'; 2476 2477 /* Fixup an Apple bug where they have bogus \0 chars in the 2478 * middle of the path in some properties, and extract 2479 * the unit name (everything after the last '/'). 2480 */ 2481 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2482 if (*p == '/') 2483 lp = namep; 2484 else if (*p != 0) 2485 *lp++ = *p; 2486 } 2487 *lp = 0; 2488 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2489 } 2490 2491 /* get it again for debugging */ 2492 path = prom_scratch; 2493 memset(path, 0, sizeof(prom_scratch)); 2494 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 2495 2496 /* get and store all properties */ 2497 prev_name = ""; 2498 sstart = (char *)dt_string_start; 2499 for (;;) { 2500 if (call_prom("nextprop", 3, 1, node, prev_name, 2501 pname) != 1) 2502 break; 2503 2504 /* skip "name" */ 2505 if (prom_strcmp(pname, "name") == 0) { 2506 prev_name = "name"; 2507 continue; 2508 } 2509 2510 /* find string offset */ 2511 soff = dt_find_string(pname); 2512 if (soff == 0) { 2513 prom_printf("WARNING: Can't find string index for" 2514 " <%s>, node %s\n", pname, path); 2515 break; 2516 } 2517 prev_name = sstart + soff; 2518 2519 /* get length */ 2520 l = call_prom("getproplen", 2, 1, node, pname); 2521 2522 /* sanity checks */ 2523 if (l == PROM_ERROR) 2524 continue; 2525 2526 /* push property head */ 2527 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2528 dt_push_token(l, mem_start, mem_end); 2529 dt_push_token(soff, mem_start, mem_end); 2530 2531 /* push property content */ 2532 valp = make_room(mem_start, mem_end, l, 4); 2533 call_prom("getprop", 4, 1, node, pname, valp, l); 2534 *mem_start = _ALIGN(*mem_start, 4); 2535 2536 if (!prom_strcmp(pname, "phandle")) 2537 has_phandle = 1; 2538 } 2539 2540 /* Add a "phandle" property if none already exist */ 2541 if (!has_phandle) { 2542 soff = dt_find_string("phandle"); 2543 if (soff == 0) 2544 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); 2545 else { 2546 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2547 dt_push_token(4, mem_start, mem_end); 2548 dt_push_token(soff, mem_start, mem_end); 2549 valp = make_room(mem_start, mem_end, 4, 4); 2550 *(__be32 *)valp = cpu_to_be32(node); 2551 } 2552 } 2553 2554 /* do all our children */ 2555 child = call_prom("child", 1, 1, node); 2556 while (child != 0) { 2557 scan_dt_build_struct(child, mem_start, mem_end); 2558 child = call_prom("peer", 1, 1, child); 2559 } 2560 2561 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2562 } 2563 2564 static void __init flatten_device_tree(void) 2565 { 2566 phandle root; 2567 unsigned long mem_start, mem_end, room; 2568 struct boot_param_header *hdr; 2569 char *namep; 2570 u64 *rsvmap; 2571 2572 /* 2573 * Check how much room we have between alloc top & bottom (+/- a 2574 * few pages), crop to 1MB, as this is our "chunk" size 2575 */ 2576 room = alloc_top - alloc_bottom - 0x4000; 2577 if (room > DEVTREE_CHUNK_SIZE) 2578 room = DEVTREE_CHUNK_SIZE; 2579 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2580 2581 /* Now try to claim that */ 2582 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2583 if (mem_start == 0) 2584 prom_panic("Can't allocate initial device-tree chunk\n"); 2585 mem_end = mem_start + room; 2586 2587 /* Get root of tree */ 2588 root = call_prom("peer", 1, 1, (phandle)0); 2589 if (root == (phandle)0) 2590 prom_panic ("couldn't get device tree root\n"); 2591 2592 /* Build header and make room for mem rsv map */ 2593 mem_start = _ALIGN(mem_start, 4); 2594 hdr = make_room(&mem_start, &mem_end, 2595 sizeof(struct boot_param_header), 4); 2596 dt_header_start = (unsigned long)hdr; 2597 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2598 2599 /* Start of strings */ 2600 mem_start = PAGE_ALIGN(mem_start); 2601 dt_string_start = mem_start; 2602 mem_start += 4; /* hole */ 2603 2604 /* Add "phandle" in there, we'll need it */ 2605 namep = make_room(&mem_start, &mem_end, 16, 1); 2606 prom_strcpy(namep, "phandle"); 2607 mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2608 2609 /* Build string array */ 2610 prom_printf("Building dt strings...\n"); 2611 scan_dt_build_strings(root, &mem_start, &mem_end); 2612 dt_string_end = mem_start; 2613 2614 /* Build structure */ 2615 mem_start = PAGE_ALIGN(mem_start); 2616 dt_struct_start = mem_start; 2617 prom_printf("Building dt structure...\n"); 2618 scan_dt_build_struct(root, &mem_start, &mem_end); 2619 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2620 dt_struct_end = PAGE_ALIGN(mem_start); 2621 2622 /* Finish header */ 2623 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2624 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2625 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2626 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2627 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2628 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2629 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2630 hdr->version = cpu_to_be32(OF_DT_VERSION); 2631 /* Version 16 is not backward compatible */ 2632 hdr->last_comp_version = cpu_to_be32(0x10); 2633 2634 /* Copy the reserve map in */ 2635 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2636 2637 #ifdef DEBUG_PROM 2638 { 2639 int i; 2640 prom_printf("reserved memory map:\n"); 2641 for (i = 0; i < mem_reserve_cnt; i++) 2642 prom_printf(" %llx - %llx\n", 2643 be64_to_cpu(mem_reserve_map[i].base), 2644 be64_to_cpu(mem_reserve_map[i].size)); 2645 } 2646 #endif 2647 /* Bump mem_reserve_cnt to cause further reservations to fail 2648 * since it's too late. 2649 */ 2650 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2651 2652 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2653 dt_string_start, dt_string_end); 2654 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2655 dt_struct_start, dt_struct_end); 2656 } 2657 2658 #ifdef CONFIG_PPC_MAPLE 2659 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2660 * The values are bad, and it doesn't even have the right number of cells. */ 2661 static void __init fixup_device_tree_maple(void) 2662 { 2663 phandle isa; 2664 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2665 u32 isa_ranges[6]; 2666 char *name; 2667 2668 name = "/ht@0/isa@4"; 2669 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2670 if (!PHANDLE_VALID(isa)) { 2671 name = "/ht@0/isa@6"; 2672 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2673 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2674 } 2675 if (!PHANDLE_VALID(isa)) 2676 return; 2677 2678 if (prom_getproplen(isa, "ranges") != 12) 2679 return; 2680 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2681 == PROM_ERROR) 2682 return; 2683 2684 if (isa_ranges[0] != 0x1 || 2685 isa_ranges[1] != 0xf4000000 || 2686 isa_ranges[2] != 0x00010000) 2687 return; 2688 2689 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2690 2691 isa_ranges[0] = 0x1; 2692 isa_ranges[1] = 0x0; 2693 isa_ranges[2] = rloc; 2694 isa_ranges[3] = 0x0; 2695 isa_ranges[4] = 0x0; 2696 isa_ranges[5] = 0x00010000; 2697 prom_setprop(isa, name, "ranges", 2698 isa_ranges, sizeof(isa_ranges)); 2699 } 2700 2701 #define CPC925_MC_START 0xf8000000 2702 #define CPC925_MC_LENGTH 0x1000000 2703 /* The values for memory-controller don't have right number of cells */ 2704 static void __init fixup_device_tree_maple_memory_controller(void) 2705 { 2706 phandle mc; 2707 u32 mc_reg[4]; 2708 char *name = "/hostbridge@f8000000"; 2709 u32 ac, sc; 2710 2711 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2712 if (!PHANDLE_VALID(mc)) 2713 return; 2714 2715 if (prom_getproplen(mc, "reg") != 8) 2716 return; 2717 2718 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2719 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2720 if ((ac != 2) || (sc != 2)) 2721 return; 2722 2723 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2724 return; 2725 2726 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2727 return; 2728 2729 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2730 2731 mc_reg[0] = 0x0; 2732 mc_reg[1] = CPC925_MC_START; 2733 mc_reg[2] = 0x0; 2734 mc_reg[3] = CPC925_MC_LENGTH; 2735 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2736 } 2737 #else 2738 #define fixup_device_tree_maple() 2739 #define fixup_device_tree_maple_memory_controller() 2740 #endif 2741 2742 #ifdef CONFIG_PPC_CHRP 2743 /* 2744 * Pegasos and BriQ lacks the "ranges" property in the isa node 2745 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2746 * Pegasos has the IDE configured in legacy mode, but advertised as native 2747 */ 2748 static void __init fixup_device_tree_chrp(void) 2749 { 2750 phandle ph; 2751 u32 prop[6]; 2752 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2753 char *name; 2754 int rc; 2755 2756 name = "/pci@80000000/isa@c"; 2757 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2758 if (!PHANDLE_VALID(ph)) { 2759 name = "/pci@ff500000/isa@6"; 2760 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2761 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2762 } 2763 if (PHANDLE_VALID(ph)) { 2764 rc = prom_getproplen(ph, "ranges"); 2765 if (rc == 0 || rc == PROM_ERROR) { 2766 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2767 2768 prop[0] = 0x1; 2769 prop[1] = 0x0; 2770 prop[2] = rloc; 2771 prop[3] = 0x0; 2772 prop[4] = 0x0; 2773 prop[5] = 0x00010000; 2774 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2775 } 2776 } 2777 2778 name = "/pci@80000000/ide@C,1"; 2779 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2780 if (PHANDLE_VALID(ph)) { 2781 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2782 prop[0] = 14; 2783 prop[1] = 0x0; 2784 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2785 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2786 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2787 if (rc == sizeof(u32)) { 2788 prop[0] &= ~0x5; 2789 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2790 } 2791 } 2792 } 2793 #else 2794 #define fixup_device_tree_chrp() 2795 #endif 2796 2797 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2798 static void __init fixup_device_tree_pmac(void) 2799 { 2800 phandle u3, i2c, mpic; 2801 u32 u3_rev; 2802 u32 interrupts[2]; 2803 u32 parent; 2804 2805 /* Some G5s have a missing interrupt definition, fix it up here */ 2806 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2807 if (!PHANDLE_VALID(u3)) 2808 return; 2809 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2810 if (!PHANDLE_VALID(i2c)) 2811 return; 2812 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2813 if (!PHANDLE_VALID(mpic)) 2814 return; 2815 2816 /* check if proper rev of u3 */ 2817 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2818 == PROM_ERROR) 2819 return; 2820 if (u3_rev < 0x35 || u3_rev > 0x39) 2821 return; 2822 /* does it need fixup ? */ 2823 if (prom_getproplen(i2c, "interrupts") > 0) 2824 return; 2825 2826 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2827 2828 /* interrupt on this revision of u3 is number 0 and level */ 2829 interrupts[0] = 0; 2830 interrupts[1] = 1; 2831 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2832 &interrupts, sizeof(interrupts)); 2833 parent = (u32)mpic; 2834 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2835 &parent, sizeof(parent)); 2836 } 2837 #else 2838 #define fixup_device_tree_pmac() 2839 #endif 2840 2841 #ifdef CONFIG_PPC_EFIKA 2842 /* 2843 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2844 * to talk to the phy. If the phy-handle property is missing, then this 2845 * function is called to add the appropriate nodes and link it to the 2846 * ethernet node. 2847 */ 2848 static void __init fixup_device_tree_efika_add_phy(void) 2849 { 2850 u32 node; 2851 char prop[64]; 2852 int rv; 2853 2854 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2855 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2856 if (!PHANDLE_VALID(node)) 2857 return; 2858 2859 /* Check if the phy-handle property exists - bail if it does */ 2860 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2861 if (!rv) 2862 return; 2863 2864 /* 2865 * At this point the ethernet device doesn't have a phy described. 2866 * Now we need to add the missing phy node and linkage 2867 */ 2868 2869 /* Check for an MDIO bus node - if missing then create one */ 2870 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2871 if (!PHANDLE_VALID(node)) { 2872 prom_printf("Adding Ethernet MDIO node\n"); 2873 call_prom("interpret", 1, 1, 2874 " s\" /builtin\" find-device" 2875 " new-device" 2876 " 1 encode-int s\" #address-cells\" property" 2877 " 0 encode-int s\" #size-cells\" property" 2878 " s\" mdio\" device-name" 2879 " s\" fsl,mpc5200b-mdio\" encode-string" 2880 " s\" compatible\" property" 2881 " 0xf0003000 0x400 reg" 2882 " 0x2 encode-int" 2883 " 0x5 encode-int encode+" 2884 " 0x3 encode-int encode+" 2885 " s\" interrupts\" property" 2886 " finish-device"); 2887 }; 2888 2889 /* Check for a PHY device node - if missing then create one and 2890 * give it's phandle to the ethernet node */ 2891 node = call_prom("finddevice", 1, 1, 2892 ADDR("/builtin/mdio/ethernet-phy")); 2893 if (!PHANDLE_VALID(node)) { 2894 prom_printf("Adding Ethernet PHY node\n"); 2895 call_prom("interpret", 1, 1, 2896 " s\" /builtin/mdio\" find-device" 2897 " new-device" 2898 " s\" ethernet-phy\" device-name" 2899 " 0x10 encode-int s\" reg\" property" 2900 " my-self" 2901 " ihandle>phandle" 2902 " finish-device" 2903 " s\" /builtin/ethernet\" find-device" 2904 " encode-int" 2905 " s\" phy-handle\" property" 2906 " device-end"); 2907 } 2908 } 2909 2910 static void __init fixup_device_tree_efika(void) 2911 { 2912 int sound_irq[3] = { 2, 2, 0 }; 2913 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2914 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2915 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2916 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2917 u32 node; 2918 char prop[64]; 2919 int rv, len; 2920 2921 /* Check if we're really running on a EFIKA */ 2922 node = call_prom("finddevice", 1, 1, ADDR("/")); 2923 if (!PHANDLE_VALID(node)) 2924 return; 2925 2926 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2927 if (rv == PROM_ERROR) 2928 return; 2929 if (prom_strcmp(prop, "EFIKA5K2")) 2930 return; 2931 2932 prom_printf("Applying EFIKA device tree fixups\n"); 2933 2934 /* Claiming to be 'chrp' is death */ 2935 node = call_prom("finddevice", 1, 1, ADDR("/")); 2936 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2937 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0)) 2938 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 2939 2940 /* CODEGEN,description is exposed in /proc/cpuinfo so 2941 fix that too */ 2942 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 2943 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP"))) 2944 prom_setprop(node, "/", "CODEGEN,description", 2945 "Efika 5200B PowerPC System", 2946 sizeof("Efika 5200B PowerPC System")); 2947 2948 /* Fixup bestcomm interrupts property */ 2949 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 2950 if (PHANDLE_VALID(node)) { 2951 len = prom_getproplen(node, "interrupts"); 2952 if (len == 12) { 2953 prom_printf("Fixing bestcomm interrupts property\n"); 2954 prom_setprop(node, "/builtin/bestcom", "interrupts", 2955 bcomm_irq, sizeof(bcomm_irq)); 2956 } 2957 } 2958 2959 /* Fixup sound interrupts property */ 2960 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 2961 if (PHANDLE_VALID(node)) { 2962 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 2963 if (rv == PROM_ERROR) { 2964 prom_printf("Adding sound interrupts property\n"); 2965 prom_setprop(node, "/builtin/sound", "interrupts", 2966 sound_irq, sizeof(sound_irq)); 2967 } 2968 } 2969 2970 /* Make sure ethernet phy-handle property exists */ 2971 fixup_device_tree_efika_add_phy(); 2972 } 2973 #else 2974 #define fixup_device_tree_efika() 2975 #endif 2976 2977 #ifdef CONFIG_PPC_PASEMI_NEMO 2978 /* 2979 * CFE supplied on Nemo is broken in several ways, biggest 2980 * problem is that it reassigns ISA interrupts to unused mpic ints. 2981 * Add an interrupt-controller property for the io-bridge to use 2982 * and correct the ints so we can attach them to an irq_domain 2983 */ 2984 static void __init fixup_device_tree_pasemi(void) 2985 { 2986 u32 interrupts[2], parent, rval, val = 0; 2987 char *name, *pci_name; 2988 phandle iob, node; 2989 2990 /* Find the root pci node */ 2991 name = "/pxp@0,e0000000"; 2992 iob = call_prom("finddevice", 1, 1, ADDR(name)); 2993 if (!PHANDLE_VALID(iob)) 2994 return; 2995 2996 /* check if interrupt-controller node set yet */ 2997 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 2998 return; 2999 3000 prom_printf("adding interrupt-controller property for SB600...\n"); 3001 3002 prom_setprop(iob, name, "interrupt-controller", &val, 0); 3003 3004 pci_name = "/pxp@0,e0000000/pci@11"; 3005 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 3006 parent = ADDR(iob); 3007 3008 for( ; prom_next_node(&node); ) { 3009 /* scan each node for one with an interrupt */ 3010 if (!PHANDLE_VALID(node)) 3011 continue; 3012 3013 rval = prom_getproplen(node, "interrupts"); 3014 if (rval == 0 || rval == PROM_ERROR) 3015 continue; 3016 3017 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 3018 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 3019 continue; 3020 3021 /* found a node, update both interrupts and interrupt-parent */ 3022 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 3023 interrupts[0] -= 203; 3024 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 3025 interrupts[0] -= 213; 3026 if (interrupts[0] == 221) 3027 interrupts[0] = 14; 3028 if (interrupts[0] == 222) 3029 interrupts[0] = 8; 3030 3031 prom_setprop(node, pci_name, "interrupts", interrupts, 3032 sizeof(interrupts)); 3033 prom_setprop(node, pci_name, "interrupt-parent", &parent, 3034 sizeof(parent)); 3035 } 3036 3037 /* 3038 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 3039 * so that generic isa-bridge code can add the SB600 and its on-board 3040 * peripherals. 3041 */ 3042 name = "/pxp@0,e0000000/io-bridge@0"; 3043 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3044 if (!PHANDLE_VALID(iob)) 3045 return; 3046 3047 /* device_type is already set, just change it. */ 3048 3049 prom_printf("Changing device_type of SB600 node...\n"); 3050 3051 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 3052 } 3053 #else /* !CONFIG_PPC_PASEMI_NEMO */ 3054 static inline void fixup_device_tree_pasemi(void) { } 3055 #endif 3056 3057 static void __init fixup_device_tree(void) 3058 { 3059 fixup_device_tree_maple(); 3060 fixup_device_tree_maple_memory_controller(); 3061 fixup_device_tree_chrp(); 3062 fixup_device_tree_pmac(); 3063 fixup_device_tree_efika(); 3064 fixup_device_tree_pasemi(); 3065 } 3066 3067 static void __init prom_find_boot_cpu(void) 3068 { 3069 __be32 rval; 3070 ihandle prom_cpu; 3071 phandle cpu_pkg; 3072 3073 rval = 0; 3074 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3075 return; 3076 prom_cpu = be32_to_cpu(rval); 3077 3078 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3079 3080 if (!PHANDLE_VALID(cpu_pkg)) 3081 return; 3082 3083 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3084 prom.cpu = be32_to_cpu(rval); 3085 3086 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3087 } 3088 3089 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3090 { 3091 #ifdef CONFIG_BLK_DEV_INITRD 3092 if (r3 && r4 && r4 != 0xdeadbeef) { 3093 __be64 val; 3094 3095 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3096 prom_initrd_end = prom_initrd_start + r4; 3097 3098 val = cpu_to_be64(prom_initrd_start); 3099 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3100 &val, sizeof(val)); 3101 val = cpu_to_be64(prom_initrd_end); 3102 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3103 &val, sizeof(val)); 3104 3105 reserve_mem(prom_initrd_start, 3106 prom_initrd_end - prom_initrd_start); 3107 3108 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3109 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3110 } 3111 #endif /* CONFIG_BLK_DEV_INITRD */ 3112 } 3113 3114 #ifdef CONFIG_PPC64 3115 #ifdef CONFIG_RELOCATABLE 3116 static void reloc_toc(void) 3117 { 3118 } 3119 3120 static void unreloc_toc(void) 3121 { 3122 } 3123 #else 3124 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3125 { 3126 unsigned long i; 3127 unsigned long *toc_entry; 3128 3129 /* Get the start of the TOC by using r2 directly. */ 3130 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3131 3132 for (i = 0; i < nr_entries; i++) { 3133 *toc_entry = *toc_entry + offset; 3134 toc_entry++; 3135 } 3136 } 3137 3138 static void reloc_toc(void) 3139 { 3140 unsigned long offset = reloc_offset(); 3141 unsigned long nr_entries = 3142 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3143 3144 __reloc_toc(offset, nr_entries); 3145 3146 mb(); 3147 } 3148 3149 static void unreloc_toc(void) 3150 { 3151 unsigned long offset = reloc_offset(); 3152 unsigned long nr_entries = 3153 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3154 3155 mb(); 3156 3157 __reloc_toc(-offset, nr_entries); 3158 } 3159 #endif 3160 #endif 3161 3162 /* 3163 * We enter here early on, when the Open Firmware prom is still 3164 * handling exceptions and the MMU hash table for us. 3165 */ 3166 3167 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3168 unsigned long pp, 3169 unsigned long r6, unsigned long r7, 3170 unsigned long kbase) 3171 { 3172 unsigned long hdr; 3173 3174 #ifdef CONFIG_PPC32 3175 unsigned long offset = reloc_offset(); 3176 reloc_got2(offset); 3177 #else 3178 reloc_toc(); 3179 #endif 3180 3181 /* 3182 * First zero the BSS 3183 */ 3184 memset(&__bss_start, 0, __bss_stop - __bss_start); 3185 3186 /* 3187 * Init interface to Open Firmware, get some node references, 3188 * like /chosen 3189 */ 3190 prom_init_client_services(pp); 3191 3192 /* 3193 * See if this OF is old enough that we need to do explicit maps 3194 * and other workarounds 3195 */ 3196 prom_find_mmu(); 3197 3198 /* 3199 * Init prom stdout device 3200 */ 3201 prom_init_stdout(); 3202 3203 prom_printf("Preparing to boot %s", linux_banner); 3204 3205 /* 3206 * Get default machine type. At this point, we do not differentiate 3207 * between pSeries SMP and pSeries LPAR 3208 */ 3209 of_platform = prom_find_machine_type(); 3210 prom_printf("Detected machine type: %x\n", of_platform); 3211 3212 #ifndef CONFIG_NONSTATIC_KERNEL 3213 /* Bail if this is a kdump kernel. */ 3214 if (PHYSICAL_START > 0) 3215 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3216 #endif 3217 3218 /* 3219 * Check for an initrd 3220 */ 3221 prom_check_initrd(r3, r4); 3222 3223 /* 3224 * Do early parsing of command line 3225 */ 3226 early_cmdline_parse(); 3227 3228 #ifdef CONFIG_PPC_PSERIES 3229 /* 3230 * On pSeries, inform the firmware about our capabilities 3231 */ 3232 if (of_platform == PLATFORM_PSERIES || 3233 of_platform == PLATFORM_PSERIES_LPAR) 3234 prom_send_capabilities(); 3235 #endif 3236 3237 /* 3238 * Copy the CPU hold code 3239 */ 3240 if (of_platform != PLATFORM_POWERMAC) 3241 copy_and_flush(0, kbase, 0x100, 0); 3242 3243 /* 3244 * Initialize memory management within prom_init 3245 */ 3246 prom_init_mem(); 3247 3248 /* 3249 * Determine which cpu is actually running right _now_ 3250 */ 3251 prom_find_boot_cpu(); 3252 3253 /* 3254 * Initialize display devices 3255 */ 3256 prom_check_displays(); 3257 3258 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3259 /* 3260 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3261 * that uses the allocator, we need to make sure we get the top of memory 3262 * available for us here... 3263 */ 3264 if (of_platform == PLATFORM_PSERIES) 3265 prom_initialize_tce_table(); 3266 #endif 3267 3268 /* 3269 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3270 * have a usable RTAS implementation. 3271 */ 3272 if (of_platform != PLATFORM_POWERMAC) 3273 prom_instantiate_rtas(); 3274 3275 #ifdef CONFIG_PPC64 3276 /* instantiate sml */ 3277 prom_instantiate_sml(); 3278 #endif 3279 3280 /* 3281 * On non-powermacs, put all CPUs in spin-loops. 3282 * 3283 * PowerMacs use a different mechanism to spin CPUs 3284 * 3285 * (This must be done after instanciating RTAS) 3286 */ 3287 if (of_platform != PLATFORM_POWERMAC) 3288 prom_hold_cpus(); 3289 3290 /* 3291 * Fill in some infos for use by the kernel later on 3292 */ 3293 if (prom_memory_limit) { 3294 __be64 val = cpu_to_be64(prom_memory_limit); 3295 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3296 &val, sizeof(val)); 3297 } 3298 #ifdef CONFIG_PPC64 3299 if (prom_iommu_off) 3300 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3301 NULL, 0); 3302 3303 if (prom_iommu_force_on) 3304 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3305 NULL, 0); 3306 3307 if (prom_tce_alloc_start) { 3308 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3309 &prom_tce_alloc_start, 3310 sizeof(prom_tce_alloc_start)); 3311 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3312 &prom_tce_alloc_end, 3313 sizeof(prom_tce_alloc_end)); 3314 } 3315 #endif 3316 3317 /* 3318 * Fixup any known bugs in the device-tree 3319 */ 3320 fixup_device_tree(); 3321 3322 /* 3323 * Now finally create the flattened device-tree 3324 */ 3325 prom_printf("copying OF device tree...\n"); 3326 flatten_device_tree(); 3327 3328 /* 3329 * in case stdin is USB and still active on IBM machines... 3330 * Unfortunately quiesce crashes on some powermacs if we have 3331 * closed stdin already (in particular the powerbook 101). 3332 */ 3333 if (of_platform != PLATFORM_POWERMAC) 3334 prom_close_stdin(); 3335 3336 /* 3337 * Call OF "quiesce" method to shut down pending DMA's from 3338 * devices etc... 3339 */ 3340 prom_printf("Quiescing Open Firmware ...\n"); 3341 call_prom("quiesce", 0, 0); 3342 3343 /* 3344 * And finally, call the kernel passing it the flattened device 3345 * tree and NULL as r5, thus triggering the new entry point which 3346 * is common to us and kexec 3347 */ 3348 hdr = dt_header_start; 3349 3350 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 3351 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3352 prom_debug("->dt_header_start=0x%lx\n", hdr); 3353 3354 #ifdef CONFIG_PPC32 3355 reloc_got2(-offset); 3356 #else 3357 unreloc_toc(); 3358 #endif 3359 3360 __start(hdr, kbase, 0, 0, 0, 0, 0); 3361 3362 return 0; 3363 } 3364