1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for interfacing to Open Firmware. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 */ 11 12 #undef DEBUG_PROM 13 14 /* we cannot use FORTIFY as it brings in new symbols */ 15 #define __NO_FORTIFY 16 17 #include <stdarg.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/init.h> 21 #include <linux/threads.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 #include <linux/pci.h> 25 #include <linux/proc_fs.h> 26 #include <linux/delay.h> 27 #include <linux/initrd.h> 28 #include <linux/bitops.h> 29 #include <asm/prom.h> 30 #include <asm/rtas.h> 31 #include <asm/page.h> 32 #include <asm/processor.h> 33 #include <asm/irq.h> 34 #include <asm/io.h> 35 #include <asm/smp.h> 36 #include <asm/mmu.h> 37 #include <asm/pgtable.h> 38 #include <asm/iommu.h> 39 #include <asm/btext.h> 40 #include <asm/sections.h> 41 #include <asm/machdep.h> 42 #include <asm/asm-prototypes.h> 43 #include <asm/ultravisor-api.h> 44 45 #include <linux/linux_logo.h> 46 47 /* All of prom_init bss lives here */ 48 #define __prombss __section(.bss.prominit) 49 50 /* 51 * Eventually bump that one up 52 */ 53 #define DEVTREE_CHUNK_SIZE 0x100000 54 55 /* 56 * This is the size of the local memory reserve map that gets copied 57 * into the boot params passed to the kernel. That size is totally 58 * flexible as the kernel just reads the list until it encounters an 59 * entry with size 0, so it can be changed without breaking binary 60 * compatibility 61 */ 62 #define MEM_RESERVE_MAP_SIZE 8 63 64 /* 65 * prom_init() is called very early on, before the kernel text 66 * and data have been mapped to KERNELBASE. At this point the code 67 * is running at whatever address it has been loaded at. 68 * On ppc32 we compile with -mrelocatable, which means that references 69 * to extern and static variables get relocated automatically. 70 * ppc64 objects are always relocatable, we just need to relocate the 71 * TOC. 72 * 73 * Because OF may have mapped I/O devices into the area starting at 74 * KERNELBASE, particularly on CHRP machines, we can't safely call 75 * OF once the kernel has been mapped to KERNELBASE. Therefore all 76 * OF calls must be done within prom_init(). 77 * 78 * ADDR is used in calls to call_prom. The 4th and following 79 * arguments to call_prom should be 32-bit values. 80 * On ppc64, 64 bit values are truncated to 32 bits (and 81 * fortunately don't get interpreted as two arguments). 82 */ 83 #define ADDR(x) (u32)(unsigned long)(x) 84 85 #ifdef CONFIG_PPC64 86 #define OF_WORKAROUNDS 0 87 #else 88 #define OF_WORKAROUNDS of_workarounds 89 static int of_workarounds __prombss; 90 #endif 91 92 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 93 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 94 95 #define PROM_BUG() do { \ 96 prom_printf("kernel BUG at %s line 0x%x!\n", \ 97 __FILE__, __LINE__); \ 98 __builtin_trap(); \ 99 } while (0) 100 101 #ifdef DEBUG_PROM 102 #define prom_debug(x...) prom_printf(x) 103 #else 104 #define prom_debug(x...) do { } while (0) 105 #endif 106 107 108 typedef u32 prom_arg_t; 109 110 struct prom_args { 111 __be32 service; 112 __be32 nargs; 113 __be32 nret; 114 __be32 args[10]; 115 }; 116 117 struct prom_t { 118 ihandle root; 119 phandle chosen; 120 int cpu; 121 ihandle stdout; 122 ihandle mmumap; 123 ihandle memory; 124 }; 125 126 struct mem_map_entry { 127 __be64 base; 128 __be64 size; 129 }; 130 131 typedef __be32 cell_t; 132 133 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 134 unsigned long r6, unsigned long r7, unsigned long r8, 135 unsigned long r9); 136 137 #ifdef CONFIG_PPC64 138 extern int enter_prom(struct prom_args *args, unsigned long entry); 139 #else 140 static inline int enter_prom(struct prom_args *args, unsigned long entry) 141 { 142 return ((int (*)(struct prom_args *))entry)(args); 143 } 144 #endif 145 146 extern void copy_and_flush(unsigned long dest, unsigned long src, 147 unsigned long size, unsigned long offset); 148 149 /* prom structure */ 150 static struct prom_t __prombss prom; 151 152 static unsigned long __prombss prom_entry; 153 154 static char __prombss of_stdout_device[256]; 155 static char __prombss prom_scratch[256]; 156 157 static unsigned long __prombss dt_header_start; 158 static unsigned long __prombss dt_struct_start, dt_struct_end; 159 static unsigned long __prombss dt_string_start, dt_string_end; 160 161 static unsigned long __prombss prom_initrd_start, prom_initrd_end; 162 163 #ifdef CONFIG_PPC64 164 static int __prombss prom_iommu_force_on; 165 static int __prombss prom_iommu_off; 166 static unsigned long __prombss prom_tce_alloc_start; 167 static unsigned long __prombss prom_tce_alloc_end; 168 #endif 169 170 #ifdef CONFIG_PPC_PSERIES 171 static bool __prombss prom_radix_disable; 172 static bool __prombss prom_xive_disable; 173 #endif 174 175 #ifdef CONFIG_PPC_SVM 176 static bool __prombss prom_svm_enable; 177 #endif 178 179 struct platform_support { 180 bool hash_mmu; 181 bool radix_mmu; 182 bool radix_gtse; 183 bool xive; 184 }; 185 186 /* Platforms codes are now obsolete in the kernel. Now only used within this 187 * file and ultimately gone too. Feel free to change them if you need, they 188 * are not shared with anything outside of this file anymore 189 */ 190 #define PLATFORM_PSERIES 0x0100 191 #define PLATFORM_PSERIES_LPAR 0x0101 192 #define PLATFORM_LPAR 0x0001 193 #define PLATFORM_POWERMAC 0x0400 194 #define PLATFORM_GENERIC 0x0500 195 196 static int __prombss of_platform; 197 198 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; 199 200 static unsigned long __prombss prom_memory_limit; 201 202 static unsigned long __prombss alloc_top; 203 static unsigned long __prombss alloc_top_high; 204 static unsigned long __prombss alloc_bottom; 205 static unsigned long __prombss rmo_top; 206 static unsigned long __prombss ram_top; 207 208 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 209 static int __prombss mem_reserve_cnt; 210 211 static cell_t __prombss regbuf[1024]; 212 213 static bool __prombss rtas_has_query_cpu_stopped; 214 215 216 /* 217 * Error results ... some OF calls will return "-1" on error, some 218 * will return 0, some will return either. To simplify, here are 219 * macros to use with any ihandle or phandle return value to check if 220 * it is valid 221 */ 222 223 #define PROM_ERROR (-1u) 224 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 225 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 226 227 /* Copied from lib/string.c and lib/kstrtox.c */ 228 229 static int __init prom_strcmp(const char *cs, const char *ct) 230 { 231 unsigned char c1, c2; 232 233 while (1) { 234 c1 = *cs++; 235 c2 = *ct++; 236 if (c1 != c2) 237 return c1 < c2 ? -1 : 1; 238 if (!c1) 239 break; 240 } 241 return 0; 242 } 243 244 static char __init *prom_strcpy(char *dest, const char *src) 245 { 246 char *tmp = dest; 247 248 while ((*dest++ = *src++) != '\0') 249 /* nothing */; 250 return tmp; 251 } 252 253 static int __init prom_strncmp(const char *cs, const char *ct, size_t count) 254 { 255 unsigned char c1, c2; 256 257 while (count) { 258 c1 = *cs++; 259 c2 = *ct++; 260 if (c1 != c2) 261 return c1 < c2 ? -1 : 1; 262 if (!c1) 263 break; 264 count--; 265 } 266 return 0; 267 } 268 269 static size_t __init prom_strlen(const char *s) 270 { 271 const char *sc; 272 273 for (sc = s; *sc != '\0'; ++sc) 274 /* nothing */; 275 return sc - s; 276 } 277 278 static int __init prom_memcmp(const void *cs, const void *ct, size_t count) 279 { 280 const unsigned char *su1, *su2; 281 int res = 0; 282 283 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 284 if ((res = *su1 - *su2) != 0) 285 break; 286 return res; 287 } 288 289 static char __init *prom_strstr(const char *s1, const char *s2) 290 { 291 size_t l1, l2; 292 293 l2 = prom_strlen(s2); 294 if (!l2) 295 return (char *)s1; 296 l1 = prom_strlen(s1); 297 while (l1 >= l2) { 298 l1--; 299 if (!prom_memcmp(s1, s2, l2)) 300 return (char *)s1; 301 s1++; 302 } 303 return NULL; 304 } 305 306 static size_t __init prom_strlcpy(char *dest, const char *src, size_t size) 307 { 308 size_t ret = prom_strlen(src); 309 310 if (size) { 311 size_t len = (ret >= size) ? size - 1 : ret; 312 memcpy(dest, src, len); 313 dest[len] = '\0'; 314 } 315 return ret; 316 } 317 318 #ifdef CONFIG_PPC_PSERIES 319 static int __init prom_strtobool(const char *s, bool *res) 320 { 321 if (!s) 322 return -EINVAL; 323 324 switch (s[0]) { 325 case 'y': 326 case 'Y': 327 case '1': 328 *res = true; 329 return 0; 330 case 'n': 331 case 'N': 332 case '0': 333 *res = false; 334 return 0; 335 case 'o': 336 case 'O': 337 switch (s[1]) { 338 case 'n': 339 case 'N': 340 *res = true; 341 return 0; 342 case 'f': 343 case 'F': 344 *res = false; 345 return 0; 346 default: 347 break; 348 } 349 default: 350 break; 351 } 352 353 return -EINVAL; 354 } 355 #endif 356 357 /* This is the one and *ONLY* place where we actually call open 358 * firmware. 359 */ 360 361 static int __init call_prom(const char *service, int nargs, int nret, ...) 362 { 363 int i; 364 struct prom_args args; 365 va_list list; 366 367 args.service = cpu_to_be32(ADDR(service)); 368 args.nargs = cpu_to_be32(nargs); 369 args.nret = cpu_to_be32(nret); 370 371 va_start(list, nret); 372 for (i = 0; i < nargs; i++) 373 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 374 va_end(list); 375 376 for (i = 0; i < nret; i++) 377 args.args[nargs+i] = 0; 378 379 if (enter_prom(&args, prom_entry) < 0) 380 return PROM_ERROR; 381 382 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 383 } 384 385 static int __init call_prom_ret(const char *service, int nargs, int nret, 386 prom_arg_t *rets, ...) 387 { 388 int i; 389 struct prom_args args; 390 va_list list; 391 392 args.service = cpu_to_be32(ADDR(service)); 393 args.nargs = cpu_to_be32(nargs); 394 args.nret = cpu_to_be32(nret); 395 396 va_start(list, rets); 397 for (i = 0; i < nargs; i++) 398 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 399 va_end(list); 400 401 for (i = 0; i < nret; i++) 402 args.args[nargs+i] = 0; 403 404 if (enter_prom(&args, prom_entry) < 0) 405 return PROM_ERROR; 406 407 if (rets != NULL) 408 for (i = 1; i < nret; ++i) 409 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 410 411 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 412 } 413 414 415 static void __init prom_print(const char *msg) 416 { 417 const char *p, *q; 418 419 if (prom.stdout == 0) 420 return; 421 422 for (p = msg; *p != 0; p = q) { 423 for (q = p; *q != 0 && *q != '\n'; ++q) 424 ; 425 if (q > p) 426 call_prom("write", 3, 1, prom.stdout, p, q - p); 427 if (*q == 0) 428 break; 429 ++q; 430 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 431 } 432 } 433 434 435 /* 436 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 437 * we do not need __udivdi3 or __umoddi3 on 32bits. 438 */ 439 static void __init prom_print_hex(unsigned long val) 440 { 441 int i, nibbles = sizeof(val)*2; 442 char buf[sizeof(val)*2+1]; 443 444 for (i = nibbles-1; i >= 0; i--) { 445 buf[i] = (val & 0xf) + '0'; 446 if (buf[i] > '9') 447 buf[i] += ('a'-'0'-10); 448 val >>= 4; 449 } 450 buf[nibbles] = '\0'; 451 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 452 } 453 454 /* max number of decimal digits in an unsigned long */ 455 #define UL_DIGITS 21 456 static void __init prom_print_dec(unsigned long val) 457 { 458 int i, size; 459 char buf[UL_DIGITS+1]; 460 461 for (i = UL_DIGITS-1; i >= 0; i--) { 462 buf[i] = (val % 10) + '0'; 463 val = val/10; 464 if (val == 0) 465 break; 466 } 467 /* shift stuff down */ 468 size = UL_DIGITS - i; 469 call_prom("write", 3, 1, prom.stdout, buf+i, size); 470 } 471 472 __printf(1, 2) 473 static void __init prom_printf(const char *format, ...) 474 { 475 const char *p, *q, *s; 476 va_list args; 477 unsigned long v; 478 long vs; 479 int n = 0; 480 481 va_start(args, format); 482 for (p = format; *p != 0; p = q) { 483 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 484 ; 485 if (q > p) 486 call_prom("write", 3, 1, prom.stdout, p, q - p); 487 if (*q == 0) 488 break; 489 if (*q == '\n') { 490 ++q; 491 call_prom("write", 3, 1, prom.stdout, 492 ADDR("\r\n"), 2); 493 continue; 494 } 495 ++q; 496 if (*q == 0) 497 break; 498 while (*q == 'l') { 499 ++q; 500 ++n; 501 } 502 switch (*q) { 503 case 's': 504 ++q; 505 s = va_arg(args, const char *); 506 prom_print(s); 507 break; 508 case 'x': 509 ++q; 510 switch (n) { 511 case 0: 512 v = va_arg(args, unsigned int); 513 break; 514 case 1: 515 v = va_arg(args, unsigned long); 516 break; 517 case 2: 518 default: 519 v = va_arg(args, unsigned long long); 520 break; 521 } 522 prom_print_hex(v); 523 break; 524 case 'u': 525 ++q; 526 switch (n) { 527 case 0: 528 v = va_arg(args, unsigned int); 529 break; 530 case 1: 531 v = va_arg(args, unsigned long); 532 break; 533 case 2: 534 default: 535 v = va_arg(args, unsigned long long); 536 break; 537 } 538 prom_print_dec(v); 539 break; 540 case 'd': 541 ++q; 542 switch (n) { 543 case 0: 544 vs = va_arg(args, int); 545 break; 546 case 1: 547 vs = va_arg(args, long); 548 break; 549 case 2: 550 default: 551 vs = va_arg(args, long long); 552 break; 553 } 554 if (vs < 0) { 555 prom_print("-"); 556 vs = -vs; 557 } 558 prom_print_dec(vs); 559 break; 560 } 561 } 562 va_end(args); 563 } 564 565 566 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 567 unsigned long align) 568 { 569 570 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 571 /* 572 * Old OF requires we claim physical and virtual separately 573 * and then map explicitly (assuming virtual mode) 574 */ 575 int ret; 576 prom_arg_t result; 577 578 ret = call_prom_ret("call-method", 5, 2, &result, 579 ADDR("claim"), prom.memory, 580 align, size, virt); 581 if (ret != 0 || result == -1) 582 return -1; 583 ret = call_prom_ret("call-method", 5, 2, &result, 584 ADDR("claim"), prom.mmumap, 585 align, size, virt); 586 if (ret != 0) { 587 call_prom("call-method", 4, 1, ADDR("release"), 588 prom.memory, size, virt); 589 return -1; 590 } 591 /* the 0x12 is M (coherence) + PP == read/write */ 592 call_prom("call-method", 6, 1, 593 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 594 return virt; 595 } 596 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 597 (prom_arg_t)align); 598 } 599 600 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 601 { 602 prom_print(reason); 603 /* Do not call exit because it clears the screen on pmac 604 * it also causes some sort of double-fault on early pmacs */ 605 if (of_platform == PLATFORM_POWERMAC) 606 asm("trap\n"); 607 608 /* ToDo: should put up an SRC here on pSeries */ 609 call_prom("exit", 0, 0); 610 611 for (;;) /* should never get here */ 612 ; 613 } 614 615 616 static int __init prom_next_node(phandle *nodep) 617 { 618 phandle node; 619 620 if ((node = *nodep) != 0 621 && (*nodep = call_prom("child", 1, 1, node)) != 0) 622 return 1; 623 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 624 return 1; 625 for (;;) { 626 if ((node = call_prom("parent", 1, 1, node)) == 0) 627 return 0; 628 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 629 return 1; 630 } 631 } 632 633 static inline int __init prom_getprop(phandle node, const char *pname, 634 void *value, size_t valuelen) 635 { 636 return call_prom("getprop", 4, 1, node, ADDR(pname), 637 (u32)(unsigned long) value, (u32) valuelen); 638 } 639 640 static inline int __init prom_getproplen(phandle node, const char *pname) 641 { 642 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 643 } 644 645 static void add_string(char **str, const char *q) 646 { 647 char *p = *str; 648 649 while (*q) 650 *p++ = *q++; 651 *p++ = ' '; 652 *str = p; 653 } 654 655 static char *tohex(unsigned int x) 656 { 657 static const char digits[] __initconst = "0123456789abcdef"; 658 static char result[9] __prombss; 659 int i; 660 661 result[8] = 0; 662 i = 8; 663 do { 664 --i; 665 result[i] = digits[x & 0xf]; 666 x >>= 4; 667 } while (x != 0 && i > 0); 668 return &result[i]; 669 } 670 671 static int __init prom_setprop(phandle node, const char *nodename, 672 const char *pname, void *value, size_t valuelen) 673 { 674 char cmd[256], *p; 675 676 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 677 return call_prom("setprop", 4, 1, node, ADDR(pname), 678 (u32)(unsigned long) value, (u32) valuelen); 679 680 /* gah... setprop doesn't work on longtrail, have to use interpret */ 681 p = cmd; 682 add_string(&p, "dev"); 683 add_string(&p, nodename); 684 add_string(&p, tohex((u32)(unsigned long) value)); 685 add_string(&p, tohex(valuelen)); 686 add_string(&p, tohex(ADDR(pname))); 687 add_string(&p, tohex(prom_strlen(pname))); 688 add_string(&p, "property"); 689 *p = 0; 690 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 691 } 692 693 /* We can't use the standard versions because of relocation headaches. */ 694 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 695 || ('a' <= (c) && (c) <= 'f') \ 696 || ('A' <= (c) && (c) <= 'F')) 697 698 #define isdigit(c) ('0' <= (c) && (c) <= '9') 699 #define islower(c) ('a' <= (c) && (c) <= 'z') 700 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 701 702 static unsigned long prom_strtoul(const char *cp, const char **endp) 703 { 704 unsigned long result = 0, base = 10, value; 705 706 if (*cp == '0') { 707 base = 8; 708 cp++; 709 if (toupper(*cp) == 'X') { 710 cp++; 711 base = 16; 712 } 713 } 714 715 while (isxdigit(*cp) && 716 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 717 result = result * base + value; 718 cp++; 719 } 720 721 if (endp) 722 *endp = cp; 723 724 return result; 725 } 726 727 static unsigned long prom_memparse(const char *ptr, const char **retptr) 728 { 729 unsigned long ret = prom_strtoul(ptr, retptr); 730 int shift = 0; 731 732 /* 733 * We can't use a switch here because GCC *may* generate a 734 * jump table which won't work, because we're not running at 735 * the address we're linked at. 736 */ 737 if ('G' == **retptr || 'g' == **retptr) 738 shift = 30; 739 740 if ('M' == **retptr || 'm' == **retptr) 741 shift = 20; 742 743 if ('K' == **retptr || 'k' == **retptr) 744 shift = 10; 745 746 if (shift) { 747 ret <<= shift; 748 (*retptr)++; 749 } 750 751 return ret; 752 } 753 754 /* 755 * Early parsing of the command line passed to the kernel, used for 756 * "mem=x" and the options that affect the iommu 757 */ 758 static void __init early_cmdline_parse(void) 759 { 760 const char *opt; 761 762 char *p; 763 int l = 0; 764 765 prom_cmd_line[0] = 0; 766 p = prom_cmd_line; 767 if ((long)prom.chosen > 0) 768 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 769 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */ 770 prom_strlcpy(prom_cmd_line, CONFIG_CMDLINE, sizeof(prom_cmd_line)); 771 prom_printf("command line: %s\n", prom_cmd_line); 772 773 #ifdef CONFIG_PPC64 774 opt = prom_strstr(prom_cmd_line, "iommu="); 775 if (opt) { 776 prom_printf("iommu opt is: %s\n", opt); 777 opt += 6; 778 while (*opt && *opt == ' ') 779 opt++; 780 if (!prom_strncmp(opt, "off", 3)) 781 prom_iommu_off = 1; 782 else if (!prom_strncmp(opt, "force", 5)) 783 prom_iommu_force_on = 1; 784 } 785 #endif 786 opt = prom_strstr(prom_cmd_line, "mem="); 787 if (opt) { 788 opt += 4; 789 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 790 #ifdef CONFIG_PPC64 791 /* Align to 16 MB == size of ppc64 large page */ 792 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 793 #endif 794 } 795 796 #ifdef CONFIG_PPC_PSERIES 797 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 798 opt = prom_strstr(prom_cmd_line, "disable_radix"); 799 if (opt) { 800 opt += 13; 801 if (*opt && *opt == '=') { 802 bool val; 803 804 if (prom_strtobool(++opt, &val)) 805 prom_radix_disable = false; 806 else 807 prom_radix_disable = val; 808 } else 809 prom_radix_disable = true; 810 } 811 if (prom_radix_disable) 812 prom_debug("Radix disabled from cmdline\n"); 813 814 opt = prom_strstr(prom_cmd_line, "xive=off"); 815 if (opt) { 816 prom_xive_disable = true; 817 prom_debug("XIVE disabled from cmdline\n"); 818 } 819 #endif /* CONFIG_PPC_PSERIES */ 820 821 #ifdef CONFIG_PPC_SVM 822 opt = prom_strstr(prom_cmd_line, "svm="); 823 if (opt) { 824 bool val; 825 826 opt += sizeof("svm=") - 1; 827 if (!prom_strtobool(opt, &val)) 828 prom_svm_enable = val; 829 } 830 #endif /* CONFIG_PPC_SVM */ 831 } 832 833 #ifdef CONFIG_PPC_PSERIES 834 /* 835 * The architecture vector has an array of PVR mask/value pairs, 836 * followed by # option vectors - 1, followed by the option vectors. 837 * 838 * See prom.h for the definition of the bits specified in the 839 * architecture vector. 840 */ 841 842 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 843 #define NUM_VECTORS(n) ((n) - 1) 844 845 /* 846 * Firmware expects 1 + n - 2, where n is the length of the option vector in 847 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 848 */ 849 #define VECTOR_LENGTH(n) (1 + (n) - 2) 850 851 struct option_vector1 { 852 u8 byte1; 853 u8 arch_versions; 854 u8 arch_versions3; 855 } __packed; 856 857 struct option_vector2 { 858 u8 byte1; 859 __be16 reserved; 860 __be32 real_base; 861 __be32 real_size; 862 __be32 virt_base; 863 __be32 virt_size; 864 __be32 load_base; 865 __be32 min_rma; 866 __be32 min_load; 867 u8 min_rma_percent; 868 u8 max_pft_size; 869 } __packed; 870 871 struct option_vector3 { 872 u8 byte1; 873 u8 byte2; 874 } __packed; 875 876 struct option_vector4 { 877 u8 byte1; 878 u8 min_vp_cap; 879 } __packed; 880 881 struct option_vector5 { 882 u8 byte1; 883 u8 byte2; 884 u8 byte3; 885 u8 cmo; 886 u8 associativity; 887 u8 bin_opts; 888 u8 micro_checkpoint; 889 u8 reserved0; 890 __be32 max_cpus; 891 __be16 papr_level; 892 __be16 reserved1; 893 u8 platform_facilities; 894 u8 reserved2; 895 __be16 reserved3; 896 u8 subprocessors; 897 u8 byte22; 898 u8 intarch; 899 u8 mmu; 900 u8 hash_ext; 901 u8 radix_ext; 902 } __packed; 903 904 struct option_vector6 { 905 u8 reserved; 906 u8 secondary_pteg; 907 u8 os_name; 908 } __packed; 909 910 struct ibm_arch_vec { 911 struct { u32 mask, val; } pvrs[12]; 912 913 u8 num_vectors; 914 915 u8 vec1_len; 916 struct option_vector1 vec1; 917 918 u8 vec2_len; 919 struct option_vector2 vec2; 920 921 u8 vec3_len; 922 struct option_vector3 vec3; 923 924 u8 vec4_len; 925 struct option_vector4 vec4; 926 927 u8 vec5_len; 928 struct option_vector5 vec5; 929 930 u8 vec6_len; 931 struct option_vector6 vec6; 932 } __packed; 933 934 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { 935 .pvrs = { 936 { 937 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 938 .val = cpu_to_be32(0x003a0000), 939 }, 940 { 941 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 942 .val = cpu_to_be32(0x003e0000), 943 }, 944 { 945 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 946 .val = cpu_to_be32(0x003f0000), 947 }, 948 { 949 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 950 .val = cpu_to_be32(0x004b0000), 951 }, 952 { 953 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 954 .val = cpu_to_be32(0x004c0000), 955 }, 956 { 957 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 958 .val = cpu_to_be32(0x004d0000), 959 }, 960 { 961 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 962 .val = cpu_to_be32(0x004e0000), 963 }, 964 { 965 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 966 .val = cpu_to_be32(0x0f000005), 967 }, 968 { 969 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 970 .val = cpu_to_be32(0x0f000004), 971 }, 972 { 973 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 974 .val = cpu_to_be32(0x0f000003), 975 }, 976 { 977 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 978 .val = cpu_to_be32(0x0f000002), 979 }, 980 { 981 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 982 .val = cpu_to_be32(0x0f000001), 983 }, 984 }, 985 986 .num_vectors = NUM_VECTORS(6), 987 988 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 989 .vec1 = { 990 .byte1 = 0, 991 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 992 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 993 .arch_versions3 = OV1_PPC_3_00, 994 }, 995 996 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 997 /* option vector 2: Open Firmware options supported */ 998 .vec2 = { 999 .byte1 = OV2_REAL_MODE, 1000 .reserved = 0, 1001 .real_base = cpu_to_be32(0xffffffff), 1002 .real_size = cpu_to_be32(0xffffffff), 1003 .virt_base = cpu_to_be32(0xffffffff), 1004 .virt_size = cpu_to_be32(0xffffffff), 1005 .load_base = cpu_to_be32(0xffffffff), 1006 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 1007 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 1008 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 1009 .max_pft_size = 48, /* max log_2(hash table size) */ 1010 }, 1011 1012 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 1013 /* option vector 3: processor options supported */ 1014 .vec3 = { 1015 .byte1 = 0, /* don't ignore, don't halt */ 1016 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 1017 }, 1018 1019 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 1020 /* option vector 4: IBM PAPR implementation */ 1021 .vec4 = { 1022 .byte1 = 0, /* don't halt */ 1023 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 1024 }, 1025 1026 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 1027 /* option vector 5: PAPR/OF options */ 1028 .vec5 = { 1029 .byte1 = 0, /* don't ignore, don't halt */ 1030 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 1031 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 1032 #ifdef CONFIG_PCI_MSI 1033 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 1034 OV5_FEAT(OV5_MSI), 1035 #else 1036 0, 1037 #endif 1038 .byte3 = 0, 1039 .cmo = 1040 #ifdef CONFIG_PPC_SMLPAR 1041 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 1042 #else 1043 0, 1044 #endif 1045 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 1046 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 1047 .micro_checkpoint = 0, 1048 .reserved0 = 0, 1049 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 1050 .papr_level = 0, 1051 .reserved1 = 0, 1052 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 1053 .reserved2 = 0, 1054 .reserved3 = 0, 1055 .subprocessors = 1, 1056 .byte22 = OV5_FEAT(OV5_DRMEM_V2), 1057 .intarch = 0, 1058 .mmu = 0, 1059 .hash_ext = 0, 1060 .radix_ext = 0, 1061 }, 1062 1063 /* option vector 6: IBM PAPR hints */ 1064 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 1065 .vec6 = { 1066 .reserved = 0, 1067 .secondary_pteg = 0, 1068 .os_name = OV6_LINUX, 1069 }, 1070 }; 1071 1072 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; 1073 1074 /* Old method - ELF header with PT_NOTE sections only works on BE */ 1075 #ifdef __BIG_ENDIAN__ 1076 static const struct fake_elf { 1077 Elf32_Ehdr elfhdr; 1078 Elf32_Phdr phdr[2]; 1079 struct chrpnote { 1080 u32 namesz; 1081 u32 descsz; 1082 u32 type; 1083 char name[8]; /* "PowerPC" */ 1084 struct chrpdesc { 1085 u32 real_mode; 1086 u32 real_base; 1087 u32 real_size; 1088 u32 virt_base; 1089 u32 virt_size; 1090 u32 load_base; 1091 } chrpdesc; 1092 } chrpnote; 1093 struct rpanote { 1094 u32 namesz; 1095 u32 descsz; 1096 u32 type; 1097 char name[24]; /* "IBM,RPA-Client-Config" */ 1098 struct rpadesc { 1099 u32 lpar_affinity; 1100 u32 min_rmo_size; 1101 u32 min_rmo_percent; 1102 u32 max_pft_size; 1103 u32 splpar; 1104 u32 min_load; 1105 u32 new_mem_def; 1106 u32 ignore_me; 1107 } rpadesc; 1108 } rpanote; 1109 } fake_elf __initconst = { 1110 .elfhdr = { 1111 .e_ident = { 0x7f, 'E', 'L', 'F', 1112 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 1113 .e_type = ET_EXEC, /* yeah right */ 1114 .e_machine = EM_PPC, 1115 .e_version = EV_CURRENT, 1116 .e_phoff = offsetof(struct fake_elf, phdr), 1117 .e_phentsize = sizeof(Elf32_Phdr), 1118 .e_phnum = 2 1119 }, 1120 .phdr = { 1121 [0] = { 1122 .p_type = PT_NOTE, 1123 .p_offset = offsetof(struct fake_elf, chrpnote), 1124 .p_filesz = sizeof(struct chrpnote) 1125 }, [1] = { 1126 .p_type = PT_NOTE, 1127 .p_offset = offsetof(struct fake_elf, rpanote), 1128 .p_filesz = sizeof(struct rpanote) 1129 } 1130 }, 1131 .chrpnote = { 1132 .namesz = sizeof("PowerPC"), 1133 .descsz = sizeof(struct chrpdesc), 1134 .type = 0x1275, 1135 .name = "PowerPC", 1136 .chrpdesc = { 1137 .real_mode = ~0U, /* ~0 means "don't care" */ 1138 .real_base = ~0U, 1139 .real_size = ~0U, 1140 .virt_base = ~0U, 1141 .virt_size = ~0U, 1142 .load_base = ~0U 1143 }, 1144 }, 1145 .rpanote = { 1146 .namesz = sizeof("IBM,RPA-Client-Config"), 1147 .descsz = sizeof(struct rpadesc), 1148 .type = 0x12759999, 1149 .name = "IBM,RPA-Client-Config", 1150 .rpadesc = { 1151 .lpar_affinity = 0, 1152 .min_rmo_size = 64, /* in megabytes */ 1153 .min_rmo_percent = 0, 1154 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1155 .splpar = 1, 1156 .min_load = ~0U, 1157 .new_mem_def = 0 1158 } 1159 } 1160 }; 1161 #endif /* __BIG_ENDIAN__ */ 1162 1163 static int __init prom_count_smt_threads(void) 1164 { 1165 phandle node; 1166 char type[64]; 1167 unsigned int plen; 1168 1169 /* Pick up th first CPU node we can find */ 1170 for (node = 0; prom_next_node(&node); ) { 1171 type[0] = 0; 1172 prom_getprop(node, "device_type", type, sizeof(type)); 1173 1174 if (prom_strcmp(type, "cpu")) 1175 continue; 1176 /* 1177 * There is an entry for each smt thread, each entry being 1178 * 4 bytes long. All cpus should have the same number of 1179 * smt threads, so return after finding the first. 1180 */ 1181 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1182 if (plen == PROM_ERROR) 1183 break; 1184 plen >>= 2; 1185 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1186 1187 /* Sanity check */ 1188 if (plen < 1 || plen > 64) { 1189 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1190 (unsigned long)plen); 1191 return 1; 1192 } 1193 return plen; 1194 } 1195 prom_debug("No threads found, assuming 1 per core\n"); 1196 1197 return 1; 1198 1199 } 1200 1201 static void __init prom_parse_mmu_model(u8 val, 1202 struct platform_support *support) 1203 { 1204 switch (val) { 1205 case OV5_FEAT(OV5_MMU_DYNAMIC): 1206 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1207 prom_debug("MMU - either supported\n"); 1208 support->radix_mmu = !prom_radix_disable; 1209 support->hash_mmu = true; 1210 break; 1211 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1212 prom_debug("MMU - radix only\n"); 1213 if (prom_radix_disable) { 1214 /* 1215 * If we __have__ to do radix, we're better off ignoring 1216 * the command line rather than not booting. 1217 */ 1218 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1219 } 1220 support->radix_mmu = true; 1221 break; 1222 case OV5_FEAT(OV5_MMU_HASH): 1223 prom_debug("MMU - hash only\n"); 1224 support->hash_mmu = true; 1225 break; 1226 default: 1227 prom_debug("Unknown mmu support option: 0x%x\n", val); 1228 break; 1229 } 1230 } 1231 1232 static void __init prom_parse_xive_model(u8 val, 1233 struct platform_support *support) 1234 { 1235 switch (val) { 1236 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1237 prom_debug("XIVE - either mode supported\n"); 1238 support->xive = !prom_xive_disable; 1239 break; 1240 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1241 prom_debug("XIVE - exploitation mode supported\n"); 1242 if (prom_xive_disable) { 1243 /* 1244 * If we __have__ to do XIVE, we're better off ignoring 1245 * the command line rather than not booting. 1246 */ 1247 prom_printf("WARNING: Ignoring cmdline option xive=off\n"); 1248 } 1249 support->xive = true; 1250 break; 1251 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1252 prom_debug("XIVE - legacy mode supported\n"); 1253 break; 1254 default: 1255 prom_debug("Unknown xive support option: 0x%x\n", val); 1256 break; 1257 } 1258 } 1259 1260 static void __init prom_parse_platform_support(u8 index, u8 val, 1261 struct platform_support *support) 1262 { 1263 switch (index) { 1264 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1265 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1266 break; 1267 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1268 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1269 prom_debug("Radix - GTSE supported\n"); 1270 support->radix_gtse = true; 1271 } 1272 break; 1273 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1274 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1275 support); 1276 break; 1277 } 1278 } 1279 1280 static void __init prom_check_platform_support(void) 1281 { 1282 struct platform_support supported = { 1283 .hash_mmu = false, 1284 .radix_mmu = false, 1285 .radix_gtse = false, 1286 .xive = false 1287 }; 1288 int prop_len = prom_getproplen(prom.chosen, 1289 "ibm,arch-vec-5-platform-support"); 1290 1291 /* 1292 * First copy the architecture vec template 1293 * 1294 * use memcpy() instead of *vec = *vec_template so that GCC replaces it 1295 * by __memcpy() when KASAN is active 1296 */ 1297 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, 1298 sizeof(ibm_architecture_vec)); 1299 1300 if (prop_len > 1) { 1301 int i; 1302 u8 vec[8]; 1303 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1304 prop_len); 1305 if (prop_len > sizeof(vec)) 1306 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", 1307 prop_len); 1308 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1309 &vec, sizeof(vec)); 1310 for (i = 0; i < sizeof(vec); i += 2) { 1311 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1312 , vec[i] 1313 , vec[i + 1]); 1314 prom_parse_platform_support(vec[i], vec[i + 1], 1315 &supported); 1316 } 1317 } 1318 1319 if (supported.radix_mmu && supported.radix_gtse && 1320 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1321 /* Radix preferred - but we require GTSE for now */ 1322 prom_debug("Asking for radix with GTSE\n"); 1323 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1324 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1325 } else if (supported.hash_mmu) { 1326 /* Default to hash mmu (if we can) */ 1327 prom_debug("Asking for hash\n"); 1328 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1329 } else { 1330 /* We're probably on a legacy hypervisor */ 1331 prom_debug("Assuming legacy hash support\n"); 1332 } 1333 1334 if (supported.xive) { 1335 prom_debug("Asking for XIVE\n"); 1336 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1337 } 1338 } 1339 1340 static void __init prom_send_capabilities(void) 1341 { 1342 ihandle root; 1343 prom_arg_t ret; 1344 u32 cores; 1345 1346 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1347 prom_check_platform_support(); 1348 1349 root = call_prom("open", 1, 1, ADDR("/")); 1350 if (root != 0) { 1351 /* We need to tell the FW about the number of cores we support. 1352 * 1353 * To do that, we count the number of threads on the first core 1354 * (we assume this is the same for all cores) and use it to 1355 * divide NR_CPUS. 1356 */ 1357 1358 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1359 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1360 cores, NR_CPUS); 1361 1362 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1363 1364 /* try calling the ibm,client-architecture-support method */ 1365 prom_printf("Calling ibm,client-architecture-support..."); 1366 if (call_prom_ret("call-method", 3, 2, &ret, 1367 ADDR("ibm,client-architecture-support"), 1368 root, 1369 ADDR(&ibm_architecture_vec)) == 0) { 1370 /* the call exists... */ 1371 if (ret) 1372 prom_printf("\nWARNING: ibm,client-architecture" 1373 "-support call FAILED!\n"); 1374 call_prom("close", 1, 0, root); 1375 prom_printf(" done\n"); 1376 return; 1377 } 1378 call_prom("close", 1, 0, root); 1379 prom_printf(" not implemented\n"); 1380 } 1381 1382 #ifdef __BIG_ENDIAN__ 1383 { 1384 ihandle elfloader; 1385 1386 /* no ibm,client-architecture-support call, try the old way */ 1387 elfloader = call_prom("open", 1, 1, 1388 ADDR("/packages/elf-loader")); 1389 if (elfloader == 0) { 1390 prom_printf("couldn't open /packages/elf-loader\n"); 1391 return; 1392 } 1393 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1394 elfloader, ADDR(&fake_elf)); 1395 call_prom("close", 1, 0, elfloader); 1396 } 1397 #endif /* __BIG_ENDIAN__ */ 1398 } 1399 #endif /* CONFIG_PPC_PSERIES */ 1400 1401 /* 1402 * Memory allocation strategy... our layout is normally: 1403 * 1404 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1405 * rare cases, initrd might end up being before the kernel though. 1406 * We assume this won't override the final kernel at 0, we have no 1407 * provision to handle that in this version, but it should hopefully 1408 * never happen. 1409 * 1410 * alloc_top is set to the top of RMO, eventually shrink down if the 1411 * TCEs overlap 1412 * 1413 * alloc_bottom is set to the top of kernel/initrd 1414 * 1415 * from there, allocations are done this way : rtas is allocated 1416 * topmost, and the device-tree is allocated from the bottom. We try 1417 * to grow the device-tree allocation as we progress. If we can't, 1418 * then we fail, we don't currently have a facility to restart 1419 * elsewhere, but that shouldn't be necessary. 1420 * 1421 * Note that calls to reserve_mem have to be done explicitly, memory 1422 * allocated with either alloc_up or alloc_down isn't automatically 1423 * reserved. 1424 */ 1425 1426 1427 /* 1428 * Allocates memory in the RMO upward from the kernel/initrd 1429 * 1430 * When align is 0, this is a special case, it means to allocate in place 1431 * at the current location of alloc_bottom or fail (that is basically 1432 * extending the previous allocation). Used for the device-tree flattening 1433 */ 1434 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1435 { 1436 unsigned long base = alloc_bottom; 1437 unsigned long addr = 0; 1438 1439 if (align) 1440 base = _ALIGN_UP(base, align); 1441 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1442 if (ram_top == 0) 1443 prom_panic("alloc_up() called with mem not initialized\n"); 1444 1445 if (align) 1446 base = _ALIGN_UP(alloc_bottom, align); 1447 else 1448 base = alloc_bottom; 1449 1450 for(; (base + size) <= alloc_top; 1451 base = _ALIGN_UP(base + 0x100000, align)) { 1452 prom_debug(" trying: 0x%lx\n\r", base); 1453 addr = (unsigned long)prom_claim(base, size, 0); 1454 if (addr != PROM_ERROR && addr != 0) 1455 break; 1456 addr = 0; 1457 if (align == 0) 1458 break; 1459 } 1460 if (addr == 0) 1461 return 0; 1462 alloc_bottom = addr + size; 1463 1464 prom_debug(" -> %lx\n", addr); 1465 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1466 prom_debug(" alloc_top : %lx\n", alloc_top); 1467 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1468 prom_debug(" rmo_top : %lx\n", rmo_top); 1469 prom_debug(" ram_top : %lx\n", ram_top); 1470 1471 return addr; 1472 } 1473 1474 /* 1475 * Allocates memory downward, either from top of RMO, or if highmem 1476 * is set, from the top of RAM. Note that this one doesn't handle 1477 * failures. It does claim memory if highmem is not set. 1478 */ 1479 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1480 int highmem) 1481 { 1482 unsigned long base, addr = 0; 1483 1484 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1485 highmem ? "(high)" : "(low)"); 1486 if (ram_top == 0) 1487 prom_panic("alloc_down() called with mem not initialized\n"); 1488 1489 if (highmem) { 1490 /* Carve out storage for the TCE table. */ 1491 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1492 if (addr <= alloc_bottom) 1493 return 0; 1494 /* Will we bump into the RMO ? If yes, check out that we 1495 * didn't overlap existing allocations there, if we did, 1496 * we are dead, we must be the first in town ! 1497 */ 1498 if (addr < rmo_top) { 1499 /* Good, we are first */ 1500 if (alloc_top == rmo_top) 1501 alloc_top = rmo_top = addr; 1502 else 1503 return 0; 1504 } 1505 alloc_top_high = addr; 1506 goto bail; 1507 } 1508 1509 base = _ALIGN_DOWN(alloc_top - size, align); 1510 for (; base > alloc_bottom; 1511 base = _ALIGN_DOWN(base - 0x100000, align)) { 1512 prom_debug(" trying: 0x%lx\n\r", base); 1513 addr = (unsigned long)prom_claim(base, size, 0); 1514 if (addr != PROM_ERROR && addr != 0) 1515 break; 1516 addr = 0; 1517 } 1518 if (addr == 0) 1519 return 0; 1520 alloc_top = addr; 1521 1522 bail: 1523 prom_debug(" -> %lx\n", addr); 1524 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1525 prom_debug(" alloc_top : %lx\n", alloc_top); 1526 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1527 prom_debug(" rmo_top : %lx\n", rmo_top); 1528 prom_debug(" ram_top : %lx\n", ram_top); 1529 1530 return addr; 1531 } 1532 1533 /* 1534 * Parse a "reg" cell 1535 */ 1536 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1537 { 1538 cell_t *p = *cellp; 1539 unsigned long r = 0; 1540 1541 /* Ignore more than 2 cells */ 1542 while (s > sizeof(unsigned long) / 4) { 1543 p++; 1544 s--; 1545 } 1546 r = be32_to_cpu(*p++); 1547 #ifdef CONFIG_PPC64 1548 if (s > 1) { 1549 r <<= 32; 1550 r |= be32_to_cpu(*(p++)); 1551 } 1552 #endif 1553 *cellp = p; 1554 return r; 1555 } 1556 1557 /* 1558 * Very dumb function for adding to the memory reserve list, but 1559 * we don't need anything smarter at this point 1560 * 1561 * XXX Eventually check for collisions. They should NEVER happen. 1562 * If problems seem to show up, it would be a good start to track 1563 * them down. 1564 */ 1565 static void __init reserve_mem(u64 base, u64 size) 1566 { 1567 u64 top = base + size; 1568 unsigned long cnt = mem_reserve_cnt; 1569 1570 if (size == 0) 1571 return; 1572 1573 /* We need to always keep one empty entry so that we 1574 * have our terminator with "size" set to 0 since we are 1575 * dumb and just copy this entire array to the boot params 1576 */ 1577 base = _ALIGN_DOWN(base, PAGE_SIZE); 1578 top = _ALIGN_UP(top, PAGE_SIZE); 1579 size = top - base; 1580 1581 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1582 prom_panic("Memory reserve map exhausted !\n"); 1583 mem_reserve_map[cnt].base = cpu_to_be64(base); 1584 mem_reserve_map[cnt].size = cpu_to_be64(size); 1585 mem_reserve_cnt = cnt + 1; 1586 } 1587 1588 /* 1589 * Initialize memory allocation mechanism, parse "memory" nodes and 1590 * obtain that way the top of memory and RMO to setup out local allocator 1591 */ 1592 static void __init prom_init_mem(void) 1593 { 1594 phandle node; 1595 char type[64]; 1596 unsigned int plen; 1597 cell_t *p, *endp; 1598 __be32 val; 1599 u32 rac, rsc; 1600 1601 /* 1602 * We iterate the memory nodes to find 1603 * 1) top of RMO (first node) 1604 * 2) top of memory 1605 */ 1606 val = cpu_to_be32(2); 1607 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1608 rac = be32_to_cpu(val); 1609 val = cpu_to_be32(1); 1610 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1611 rsc = be32_to_cpu(val); 1612 prom_debug("root_addr_cells: %x\n", rac); 1613 prom_debug("root_size_cells: %x\n", rsc); 1614 1615 prom_debug("scanning memory:\n"); 1616 1617 for (node = 0; prom_next_node(&node); ) { 1618 type[0] = 0; 1619 prom_getprop(node, "device_type", type, sizeof(type)); 1620 1621 if (type[0] == 0) { 1622 /* 1623 * CHRP Longtrail machines have no device_type 1624 * on the memory node, so check the name instead... 1625 */ 1626 prom_getprop(node, "name", type, sizeof(type)); 1627 } 1628 if (prom_strcmp(type, "memory")) 1629 continue; 1630 1631 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1632 if (plen > sizeof(regbuf)) { 1633 prom_printf("memory node too large for buffer !\n"); 1634 plen = sizeof(regbuf); 1635 } 1636 p = regbuf; 1637 endp = p + (plen / sizeof(cell_t)); 1638 1639 #ifdef DEBUG_PROM 1640 memset(prom_scratch, 0, sizeof(prom_scratch)); 1641 call_prom("package-to-path", 3, 1, node, prom_scratch, 1642 sizeof(prom_scratch) - 1); 1643 prom_debug(" node %s :\n", prom_scratch); 1644 #endif /* DEBUG_PROM */ 1645 1646 while ((endp - p) >= (rac + rsc)) { 1647 unsigned long base, size; 1648 1649 base = prom_next_cell(rac, &p); 1650 size = prom_next_cell(rsc, &p); 1651 1652 if (size == 0) 1653 continue; 1654 prom_debug(" %lx %lx\n", base, size); 1655 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1656 rmo_top = size; 1657 if ((base + size) > ram_top) 1658 ram_top = base + size; 1659 } 1660 } 1661 1662 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1663 1664 /* 1665 * If prom_memory_limit is set we reduce the upper limits *except* for 1666 * alloc_top_high. This must be the real top of RAM so we can put 1667 * TCE's up there. 1668 */ 1669 1670 alloc_top_high = ram_top; 1671 1672 if (prom_memory_limit) { 1673 if (prom_memory_limit <= alloc_bottom) { 1674 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1675 prom_memory_limit); 1676 prom_memory_limit = 0; 1677 } else if (prom_memory_limit >= ram_top) { 1678 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1679 prom_memory_limit); 1680 prom_memory_limit = 0; 1681 } else { 1682 ram_top = prom_memory_limit; 1683 rmo_top = min(rmo_top, prom_memory_limit); 1684 } 1685 } 1686 1687 /* 1688 * Setup our top alloc point, that is top of RMO or top of 1689 * segment 0 when running non-LPAR. 1690 * Some RS64 machines have buggy firmware where claims up at 1691 * 1GB fail. Cap at 768MB as a workaround. 1692 * Since 768MB is plenty of room, and we need to cap to something 1693 * reasonable on 32-bit, cap at 768MB on all machines. 1694 */ 1695 if (!rmo_top) 1696 rmo_top = ram_top; 1697 rmo_top = min(0x30000000ul, rmo_top); 1698 alloc_top = rmo_top; 1699 alloc_top_high = ram_top; 1700 1701 /* 1702 * Check if we have an initrd after the kernel but still inside 1703 * the RMO. If we do move our bottom point to after it. 1704 */ 1705 if (prom_initrd_start && 1706 prom_initrd_start < rmo_top && 1707 prom_initrd_end > alloc_bottom) 1708 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1709 1710 prom_printf("memory layout at init:\n"); 1711 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1712 prom_memory_limit); 1713 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1714 prom_printf(" alloc_top : %lx\n", alloc_top); 1715 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1716 prom_printf(" rmo_top : %lx\n", rmo_top); 1717 prom_printf(" ram_top : %lx\n", ram_top); 1718 } 1719 1720 static void __init prom_close_stdin(void) 1721 { 1722 __be32 val; 1723 ihandle stdin; 1724 1725 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1726 stdin = be32_to_cpu(val); 1727 call_prom("close", 1, 0, stdin); 1728 } 1729 } 1730 1731 #ifdef CONFIG_PPC_SVM 1732 static int prom_rtas_hcall(uint64_t args) 1733 { 1734 register uint64_t arg1 asm("r3") = H_RTAS; 1735 register uint64_t arg2 asm("r4") = args; 1736 1737 asm volatile("sc 1\n" : "=r" (arg1) : 1738 "r" (arg1), 1739 "r" (arg2) :); 1740 return arg1; 1741 } 1742 1743 static struct rtas_args __prombss os_term_args; 1744 1745 static void __init prom_rtas_os_term(char *str) 1746 { 1747 phandle rtas_node; 1748 __be32 val; 1749 u32 token; 1750 1751 prom_debug("%s: start...\n", __func__); 1752 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1753 prom_debug("rtas_node: %x\n", rtas_node); 1754 if (!PHANDLE_VALID(rtas_node)) 1755 return; 1756 1757 val = 0; 1758 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val)); 1759 token = be32_to_cpu(val); 1760 prom_debug("ibm,os-term: %x\n", token); 1761 if (token == 0) 1762 prom_panic("Could not get token for ibm,os-term\n"); 1763 os_term_args.token = cpu_to_be32(token); 1764 prom_rtas_hcall((uint64_t)&os_term_args); 1765 } 1766 #endif /* CONFIG_PPC_SVM */ 1767 1768 /* 1769 * Allocate room for and instantiate RTAS 1770 */ 1771 static void __init prom_instantiate_rtas(void) 1772 { 1773 phandle rtas_node; 1774 ihandle rtas_inst; 1775 u32 base, entry = 0; 1776 __be32 val; 1777 u32 size = 0; 1778 1779 prom_debug("prom_instantiate_rtas: start...\n"); 1780 1781 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1782 prom_debug("rtas_node: %x\n", rtas_node); 1783 if (!PHANDLE_VALID(rtas_node)) 1784 return; 1785 1786 val = 0; 1787 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1788 size = be32_to_cpu(val); 1789 if (size == 0) 1790 return; 1791 1792 base = alloc_down(size, PAGE_SIZE, 0); 1793 if (base == 0) 1794 prom_panic("Could not allocate memory for RTAS\n"); 1795 1796 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1797 if (!IHANDLE_VALID(rtas_inst)) { 1798 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1799 return; 1800 } 1801 1802 prom_printf("instantiating rtas at 0x%x...", base); 1803 1804 if (call_prom_ret("call-method", 3, 2, &entry, 1805 ADDR("instantiate-rtas"), 1806 rtas_inst, base) != 0 1807 || entry == 0) { 1808 prom_printf(" failed\n"); 1809 return; 1810 } 1811 prom_printf(" done\n"); 1812 1813 reserve_mem(base, size); 1814 1815 val = cpu_to_be32(base); 1816 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1817 &val, sizeof(val)); 1818 val = cpu_to_be32(entry); 1819 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1820 &val, sizeof(val)); 1821 1822 /* Check if it supports "query-cpu-stopped-state" */ 1823 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1824 &val, sizeof(val)) != PROM_ERROR) 1825 rtas_has_query_cpu_stopped = true; 1826 1827 prom_debug("rtas base = 0x%x\n", base); 1828 prom_debug("rtas entry = 0x%x\n", entry); 1829 prom_debug("rtas size = 0x%x\n", size); 1830 1831 prom_debug("prom_instantiate_rtas: end...\n"); 1832 } 1833 1834 #ifdef CONFIG_PPC64 1835 /* 1836 * Allocate room for and instantiate Stored Measurement Log (SML) 1837 */ 1838 static void __init prom_instantiate_sml(void) 1839 { 1840 phandle ibmvtpm_node; 1841 ihandle ibmvtpm_inst; 1842 u32 entry = 0, size = 0, succ = 0; 1843 u64 base; 1844 __be32 val; 1845 1846 prom_debug("prom_instantiate_sml: start...\n"); 1847 1848 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1849 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1850 if (!PHANDLE_VALID(ibmvtpm_node)) 1851 return; 1852 1853 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1854 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1855 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1856 return; 1857 } 1858 1859 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1860 &val, sizeof(val)) != PROM_ERROR) { 1861 if (call_prom_ret("call-method", 2, 2, &succ, 1862 ADDR("reformat-sml-to-efi-alignment"), 1863 ibmvtpm_inst) != 0 || succ == 0) { 1864 prom_printf("Reformat SML to EFI alignment failed\n"); 1865 return; 1866 } 1867 1868 if (call_prom_ret("call-method", 2, 2, &size, 1869 ADDR("sml-get-allocated-size"), 1870 ibmvtpm_inst) != 0 || size == 0) { 1871 prom_printf("SML get allocated size failed\n"); 1872 return; 1873 } 1874 } else { 1875 if (call_prom_ret("call-method", 2, 2, &size, 1876 ADDR("sml-get-handover-size"), 1877 ibmvtpm_inst) != 0 || size == 0) { 1878 prom_printf("SML get handover size failed\n"); 1879 return; 1880 } 1881 } 1882 1883 base = alloc_down(size, PAGE_SIZE, 0); 1884 if (base == 0) 1885 prom_panic("Could not allocate memory for sml\n"); 1886 1887 prom_printf("instantiating sml at 0x%llx...", base); 1888 1889 memset((void *)base, 0, size); 1890 1891 if (call_prom_ret("call-method", 4, 2, &entry, 1892 ADDR("sml-handover"), 1893 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1894 prom_printf("SML handover failed\n"); 1895 return; 1896 } 1897 prom_printf(" done\n"); 1898 1899 reserve_mem(base, size); 1900 1901 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1902 &base, sizeof(base)); 1903 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1904 &size, sizeof(size)); 1905 1906 prom_debug("sml base = 0x%llx\n", base); 1907 prom_debug("sml size = 0x%x\n", size); 1908 1909 prom_debug("prom_instantiate_sml: end...\n"); 1910 } 1911 1912 /* 1913 * Allocate room for and initialize TCE tables 1914 */ 1915 #ifdef __BIG_ENDIAN__ 1916 static void __init prom_initialize_tce_table(void) 1917 { 1918 phandle node; 1919 ihandle phb_node; 1920 char compatible[64], type[64], model[64]; 1921 char *path = prom_scratch; 1922 u64 base, align; 1923 u32 minalign, minsize; 1924 u64 tce_entry, *tce_entryp; 1925 u64 local_alloc_top, local_alloc_bottom; 1926 u64 i; 1927 1928 if (prom_iommu_off) 1929 return; 1930 1931 prom_debug("starting prom_initialize_tce_table\n"); 1932 1933 /* Cache current top of allocs so we reserve a single block */ 1934 local_alloc_top = alloc_top_high; 1935 local_alloc_bottom = local_alloc_top; 1936 1937 /* Search all nodes looking for PHBs. */ 1938 for (node = 0; prom_next_node(&node); ) { 1939 compatible[0] = 0; 1940 type[0] = 0; 1941 model[0] = 0; 1942 prom_getprop(node, "compatible", 1943 compatible, sizeof(compatible)); 1944 prom_getprop(node, "device_type", type, sizeof(type)); 1945 prom_getprop(node, "model", model, sizeof(model)); 1946 1947 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL)) 1948 continue; 1949 1950 /* Keep the old logic intact to avoid regression. */ 1951 if (compatible[0] != 0) { 1952 if ((prom_strstr(compatible, "python") == NULL) && 1953 (prom_strstr(compatible, "Speedwagon") == NULL) && 1954 (prom_strstr(compatible, "Winnipeg") == NULL)) 1955 continue; 1956 } else if (model[0] != 0) { 1957 if ((prom_strstr(model, "ython") == NULL) && 1958 (prom_strstr(model, "peedwagon") == NULL) && 1959 (prom_strstr(model, "innipeg") == NULL)) 1960 continue; 1961 } 1962 1963 if (prom_getprop(node, "tce-table-minalign", &minalign, 1964 sizeof(minalign)) == PROM_ERROR) 1965 minalign = 0; 1966 if (prom_getprop(node, "tce-table-minsize", &minsize, 1967 sizeof(minsize)) == PROM_ERROR) 1968 minsize = 4UL << 20; 1969 1970 /* 1971 * Even though we read what OF wants, we just set the table 1972 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1973 * By doing this, we avoid the pitfalls of trying to DMA to 1974 * MMIO space and the DMA alias hole. 1975 */ 1976 minsize = 4UL << 20; 1977 1978 /* Align to the greater of the align or size */ 1979 align = max(minalign, minsize); 1980 base = alloc_down(minsize, align, 1); 1981 if (base == 0) 1982 prom_panic("ERROR, cannot find space for TCE table.\n"); 1983 if (base < local_alloc_bottom) 1984 local_alloc_bottom = base; 1985 1986 /* It seems OF doesn't null-terminate the path :-( */ 1987 memset(path, 0, sizeof(prom_scratch)); 1988 /* Call OF to setup the TCE hardware */ 1989 if (call_prom("package-to-path", 3, 1, node, 1990 path, sizeof(prom_scratch) - 1) == PROM_ERROR) { 1991 prom_printf("package-to-path failed\n"); 1992 } 1993 1994 /* Save away the TCE table attributes for later use. */ 1995 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1996 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1997 1998 prom_debug("TCE table: %s\n", path); 1999 prom_debug("\tnode = 0x%x\n", node); 2000 prom_debug("\tbase = 0x%llx\n", base); 2001 prom_debug("\tsize = 0x%x\n", minsize); 2002 2003 /* Initialize the table to have a one-to-one mapping 2004 * over the allocated size. 2005 */ 2006 tce_entryp = (u64 *)base; 2007 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 2008 tce_entry = (i << PAGE_SHIFT); 2009 tce_entry |= 0x3; 2010 *tce_entryp = tce_entry; 2011 } 2012 2013 prom_printf("opening PHB %s", path); 2014 phb_node = call_prom("open", 1, 1, path); 2015 if (phb_node == 0) 2016 prom_printf("... failed\n"); 2017 else 2018 prom_printf("... done\n"); 2019 2020 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 2021 phb_node, -1, minsize, 2022 (u32) base, (u32) (base >> 32)); 2023 call_prom("close", 1, 0, phb_node); 2024 } 2025 2026 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 2027 2028 /* These are only really needed if there is a memory limit in 2029 * effect, but we don't know so export them always. */ 2030 prom_tce_alloc_start = local_alloc_bottom; 2031 prom_tce_alloc_end = local_alloc_top; 2032 2033 /* Flag the first invalid entry */ 2034 prom_debug("ending prom_initialize_tce_table\n"); 2035 } 2036 #endif /* __BIG_ENDIAN__ */ 2037 #endif /* CONFIG_PPC64 */ 2038 2039 /* 2040 * With CHRP SMP we need to use the OF to start the other processors. 2041 * We can't wait until smp_boot_cpus (the OF is trashed by then) 2042 * so we have to put the processors into a holding pattern controlled 2043 * by the kernel (not OF) before we destroy the OF. 2044 * 2045 * This uses a chunk of low memory, puts some holding pattern 2046 * code there and sends the other processors off to there until 2047 * smp_boot_cpus tells them to do something. The holding pattern 2048 * checks that address until its cpu # is there, when it is that 2049 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 2050 * of setting those values. 2051 * 2052 * We also use physical address 0x4 here to tell when a cpu 2053 * is in its holding pattern code. 2054 * 2055 * -- Cort 2056 */ 2057 /* 2058 * We want to reference the copy of __secondary_hold_* in the 2059 * 0 - 0x100 address range 2060 */ 2061 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 2062 2063 static void __init prom_hold_cpus(void) 2064 { 2065 unsigned long i; 2066 phandle node; 2067 char type[64]; 2068 unsigned long *spinloop 2069 = (void *) LOW_ADDR(__secondary_hold_spinloop); 2070 unsigned long *acknowledge 2071 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 2072 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 2073 2074 /* 2075 * On pseries, if RTAS supports "query-cpu-stopped-state", 2076 * we skip this stage, the CPUs will be started by the 2077 * kernel using RTAS. 2078 */ 2079 if ((of_platform == PLATFORM_PSERIES || 2080 of_platform == PLATFORM_PSERIES_LPAR) && 2081 rtas_has_query_cpu_stopped) { 2082 prom_printf("prom_hold_cpus: skipped\n"); 2083 return; 2084 } 2085 2086 prom_debug("prom_hold_cpus: start...\n"); 2087 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 2088 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 2089 prom_debug(" 1) acknowledge = 0x%lx\n", 2090 (unsigned long)acknowledge); 2091 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 2092 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 2093 2094 /* Set the common spinloop variable, so all of the secondary cpus 2095 * will block when they are awakened from their OF spinloop. 2096 * This must occur for both SMP and non SMP kernels, since OF will 2097 * be trashed when we move the kernel. 2098 */ 2099 *spinloop = 0; 2100 2101 /* look for cpus */ 2102 for (node = 0; prom_next_node(&node); ) { 2103 unsigned int cpu_no; 2104 __be32 reg; 2105 2106 type[0] = 0; 2107 prom_getprop(node, "device_type", type, sizeof(type)); 2108 if (prom_strcmp(type, "cpu") != 0) 2109 continue; 2110 2111 /* Skip non-configured cpus. */ 2112 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 2113 if (prom_strcmp(type, "okay") != 0) 2114 continue; 2115 2116 reg = cpu_to_be32(-1); /* make sparse happy */ 2117 prom_getprop(node, "reg", ®, sizeof(reg)); 2118 cpu_no = be32_to_cpu(reg); 2119 2120 prom_debug("cpu hw idx = %u\n", cpu_no); 2121 2122 /* Init the acknowledge var which will be reset by 2123 * the secondary cpu when it awakens from its OF 2124 * spinloop. 2125 */ 2126 *acknowledge = (unsigned long)-1; 2127 2128 if (cpu_no != prom.cpu) { 2129 /* Primary Thread of non-boot cpu or any thread */ 2130 prom_printf("starting cpu hw idx %u... ", cpu_no); 2131 call_prom("start-cpu", 3, 0, node, 2132 secondary_hold, cpu_no); 2133 2134 for (i = 0; (i < 100000000) && 2135 (*acknowledge == ((unsigned long)-1)); i++ ) 2136 mb(); 2137 2138 if (*acknowledge == cpu_no) 2139 prom_printf("done\n"); 2140 else 2141 prom_printf("failed: %lx\n", *acknowledge); 2142 } 2143 #ifdef CONFIG_SMP 2144 else 2145 prom_printf("boot cpu hw idx %u\n", cpu_no); 2146 #endif /* CONFIG_SMP */ 2147 } 2148 2149 prom_debug("prom_hold_cpus: end...\n"); 2150 } 2151 2152 2153 static void __init prom_init_client_services(unsigned long pp) 2154 { 2155 /* Get a handle to the prom entry point before anything else */ 2156 prom_entry = pp; 2157 2158 /* get a handle for the stdout device */ 2159 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2160 if (!PHANDLE_VALID(prom.chosen)) 2161 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2162 2163 /* get device tree root */ 2164 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2165 if (!PHANDLE_VALID(prom.root)) 2166 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2167 2168 prom.mmumap = 0; 2169 } 2170 2171 #ifdef CONFIG_PPC32 2172 /* 2173 * For really old powermacs, we need to map things we claim. 2174 * For that, we need the ihandle of the mmu. 2175 * Also, on the longtrail, we need to work around other bugs. 2176 */ 2177 static void __init prom_find_mmu(void) 2178 { 2179 phandle oprom; 2180 char version[64]; 2181 2182 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2183 if (!PHANDLE_VALID(oprom)) 2184 return; 2185 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2186 return; 2187 version[sizeof(version) - 1] = 0; 2188 /* XXX might need to add other versions here */ 2189 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0) 2190 of_workarounds = OF_WA_CLAIM; 2191 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) { 2192 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2193 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2194 } else 2195 return; 2196 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2197 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2198 sizeof(prom.mmumap)); 2199 prom.mmumap = be32_to_cpu(prom.mmumap); 2200 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2201 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2202 } 2203 #else 2204 #define prom_find_mmu() 2205 #endif 2206 2207 static void __init prom_init_stdout(void) 2208 { 2209 char *path = of_stdout_device; 2210 char type[16]; 2211 phandle stdout_node; 2212 __be32 val; 2213 2214 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2215 prom_panic("cannot find stdout"); 2216 2217 prom.stdout = be32_to_cpu(val); 2218 2219 /* Get the full OF pathname of the stdout device */ 2220 memset(path, 0, 256); 2221 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2222 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2223 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2224 path, prom_strlen(path) + 1); 2225 2226 /* instance-to-package fails on PA-Semi */ 2227 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2228 if (stdout_node != PROM_ERROR) { 2229 val = cpu_to_be32(stdout_node); 2230 2231 /* If it's a display, note it */ 2232 memset(type, 0, sizeof(type)); 2233 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2234 if (prom_strcmp(type, "display") == 0) 2235 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2236 } 2237 } 2238 2239 static int __init prom_find_machine_type(void) 2240 { 2241 char compat[256]; 2242 int len, i = 0; 2243 #ifdef CONFIG_PPC64 2244 phandle rtas; 2245 int x; 2246 #endif 2247 2248 /* Look for a PowerMac or a Cell */ 2249 len = prom_getprop(prom.root, "compatible", 2250 compat, sizeof(compat)-1); 2251 if (len > 0) { 2252 compat[len] = 0; 2253 while (i < len) { 2254 char *p = &compat[i]; 2255 int sl = prom_strlen(p); 2256 if (sl == 0) 2257 break; 2258 if (prom_strstr(p, "Power Macintosh") || 2259 prom_strstr(p, "MacRISC")) 2260 return PLATFORM_POWERMAC; 2261 #ifdef CONFIG_PPC64 2262 /* We must make sure we don't detect the IBM Cell 2263 * blades as pSeries due to some firmware issues, 2264 * so we do it here. 2265 */ 2266 if (prom_strstr(p, "IBM,CBEA") || 2267 prom_strstr(p, "IBM,CPBW-1.0")) 2268 return PLATFORM_GENERIC; 2269 #endif /* CONFIG_PPC64 */ 2270 i += sl + 1; 2271 } 2272 } 2273 #ifdef CONFIG_PPC64 2274 /* Try to figure out if it's an IBM pSeries or any other 2275 * PAPR compliant platform. We assume it is if : 2276 * - /device_type is "chrp" (please, do NOT use that for future 2277 * non-IBM designs ! 2278 * - it has /rtas 2279 */ 2280 len = prom_getprop(prom.root, "device_type", 2281 compat, sizeof(compat)-1); 2282 if (len <= 0) 2283 return PLATFORM_GENERIC; 2284 if (prom_strcmp(compat, "chrp")) 2285 return PLATFORM_GENERIC; 2286 2287 /* Default to pSeries. We need to know if we are running LPAR */ 2288 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2289 if (!PHANDLE_VALID(rtas)) 2290 return PLATFORM_GENERIC; 2291 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2292 if (x != PROM_ERROR) { 2293 prom_debug("Hypertas detected, assuming LPAR !\n"); 2294 return PLATFORM_PSERIES_LPAR; 2295 } 2296 return PLATFORM_PSERIES; 2297 #else 2298 return PLATFORM_GENERIC; 2299 #endif 2300 } 2301 2302 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2303 { 2304 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2305 } 2306 2307 /* 2308 * If we have a display that we don't know how to drive, 2309 * we will want to try to execute OF's open method for it 2310 * later. However, OF will probably fall over if we do that 2311 * we've taken over the MMU. 2312 * So we check whether we will need to open the display, 2313 * and if so, open it now. 2314 */ 2315 static void __init prom_check_displays(void) 2316 { 2317 char type[16], *path; 2318 phandle node; 2319 ihandle ih; 2320 int i; 2321 2322 static const unsigned char default_colors[] __initconst = { 2323 0x00, 0x00, 0x00, 2324 0x00, 0x00, 0xaa, 2325 0x00, 0xaa, 0x00, 2326 0x00, 0xaa, 0xaa, 2327 0xaa, 0x00, 0x00, 2328 0xaa, 0x00, 0xaa, 2329 0xaa, 0xaa, 0x00, 2330 0xaa, 0xaa, 0xaa, 2331 0x55, 0x55, 0x55, 2332 0x55, 0x55, 0xff, 2333 0x55, 0xff, 0x55, 2334 0x55, 0xff, 0xff, 2335 0xff, 0x55, 0x55, 2336 0xff, 0x55, 0xff, 2337 0xff, 0xff, 0x55, 2338 0xff, 0xff, 0xff 2339 }; 2340 const unsigned char *clut; 2341 2342 prom_debug("Looking for displays\n"); 2343 for (node = 0; prom_next_node(&node); ) { 2344 memset(type, 0, sizeof(type)); 2345 prom_getprop(node, "device_type", type, sizeof(type)); 2346 if (prom_strcmp(type, "display") != 0) 2347 continue; 2348 2349 /* It seems OF doesn't null-terminate the path :-( */ 2350 path = prom_scratch; 2351 memset(path, 0, sizeof(prom_scratch)); 2352 2353 /* 2354 * leave some room at the end of the path for appending extra 2355 * arguments 2356 */ 2357 if (call_prom("package-to-path", 3, 1, node, path, 2358 sizeof(prom_scratch) - 10) == PROM_ERROR) 2359 continue; 2360 prom_printf("found display : %s, opening... ", path); 2361 2362 ih = call_prom("open", 1, 1, path); 2363 if (ih == 0) { 2364 prom_printf("failed\n"); 2365 continue; 2366 } 2367 2368 /* Success */ 2369 prom_printf("done\n"); 2370 prom_setprop(node, path, "linux,opened", NULL, 0); 2371 2372 /* Setup a usable color table when the appropriate 2373 * method is available. Should update this to set-colors */ 2374 clut = default_colors; 2375 for (i = 0; i < 16; i++, clut += 3) 2376 if (prom_set_color(ih, i, clut[0], clut[1], 2377 clut[2]) != 0) 2378 break; 2379 2380 #ifdef CONFIG_LOGO_LINUX_CLUT224 2381 clut = PTRRELOC(logo_linux_clut224.clut); 2382 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2383 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2384 clut[2]) != 0) 2385 break; 2386 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2387 2388 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2389 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2390 PROM_ERROR) { 2391 u32 width, height, pitch, addr; 2392 2393 prom_printf("Setting btext !\n"); 2394 prom_getprop(node, "width", &width, 4); 2395 prom_getprop(node, "height", &height, 4); 2396 prom_getprop(node, "linebytes", &pitch, 4); 2397 prom_getprop(node, "address", &addr, 4); 2398 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2399 width, height, pitch, addr); 2400 btext_setup_display(width, height, 8, pitch, addr); 2401 btext_prepare_BAT(); 2402 } 2403 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2404 } 2405 } 2406 2407 2408 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2409 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2410 unsigned long needed, unsigned long align) 2411 { 2412 void *ret; 2413 2414 *mem_start = _ALIGN(*mem_start, align); 2415 while ((*mem_start + needed) > *mem_end) { 2416 unsigned long room, chunk; 2417 2418 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2419 alloc_bottom); 2420 room = alloc_top - alloc_bottom; 2421 if (room > DEVTREE_CHUNK_SIZE) 2422 room = DEVTREE_CHUNK_SIZE; 2423 if (room < PAGE_SIZE) 2424 prom_panic("No memory for flatten_device_tree " 2425 "(no room)\n"); 2426 chunk = alloc_up(room, 0); 2427 if (chunk == 0) 2428 prom_panic("No memory for flatten_device_tree " 2429 "(claim failed)\n"); 2430 *mem_end = chunk + room; 2431 } 2432 2433 ret = (void *)*mem_start; 2434 *mem_start += needed; 2435 2436 return ret; 2437 } 2438 2439 #define dt_push_token(token, mem_start, mem_end) do { \ 2440 void *room = make_room(mem_start, mem_end, 4, 4); \ 2441 *(__be32 *)room = cpu_to_be32(token); \ 2442 } while(0) 2443 2444 static unsigned long __init dt_find_string(char *str) 2445 { 2446 char *s, *os; 2447 2448 s = os = (char *)dt_string_start; 2449 s += 4; 2450 while (s < (char *)dt_string_end) { 2451 if (prom_strcmp(s, str) == 0) 2452 return s - os; 2453 s += prom_strlen(s) + 1; 2454 } 2455 return 0; 2456 } 2457 2458 /* 2459 * The Open Firmware 1275 specification states properties must be 31 bytes or 2460 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2461 */ 2462 #define MAX_PROPERTY_NAME 64 2463 2464 static void __init scan_dt_build_strings(phandle node, 2465 unsigned long *mem_start, 2466 unsigned long *mem_end) 2467 { 2468 char *prev_name, *namep, *sstart; 2469 unsigned long soff; 2470 phandle child; 2471 2472 sstart = (char *)dt_string_start; 2473 2474 /* get and store all property names */ 2475 prev_name = ""; 2476 for (;;) { 2477 /* 64 is max len of name including nul. */ 2478 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2479 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2480 /* No more nodes: unwind alloc */ 2481 *mem_start = (unsigned long)namep; 2482 break; 2483 } 2484 2485 /* skip "name" */ 2486 if (prom_strcmp(namep, "name") == 0) { 2487 *mem_start = (unsigned long)namep; 2488 prev_name = "name"; 2489 continue; 2490 } 2491 /* get/create string entry */ 2492 soff = dt_find_string(namep); 2493 if (soff != 0) { 2494 *mem_start = (unsigned long)namep; 2495 namep = sstart + soff; 2496 } else { 2497 /* Trim off some if we can */ 2498 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2499 dt_string_end = *mem_start; 2500 } 2501 prev_name = namep; 2502 } 2503 2504 /* do all our children */ 2505 child = call_prom("child", 1, 1, node); 2506 while (child != 0) { 2507 scan_dt_build_strings(child, mem_start, mem_end); 2508 child = call_prom("peer", 1, 1, child); 2509 } 2510 } 2511 2512 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2513 unsigned long *mem_end) 2514 { 2515 phandle child; 2516 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2517 unsigned long soff; 2518 unsigned char *valp; 2519 static char pname[MAX_PROPERTY_NAME] __prombss; 2520 int l, room, has_phandle = 0; 2521 2522 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2523 2524 /* get the node's full name */ 2525 namep = (char *)*mem_start; 2526 room = *mem_end - *mem_start; 2527 if (room > 255) 2528 room = 255; 2529 l = call_prom("package-to-path", 3, 1, node, namep, room); 2530 if (l >= 0) { 2531 /* Didn't fit? Get more room. */ 2532 if (l >= room) { 2533 if (l >= *mem_end - *mem_start) 2534 namep = make_room(mem_start, mem_end, l+1, 1); 2535 call_prom("package-to-path", 3, 1, node, namep, l); 2536 } 2537 namep[l] = '\0'; 2538 2539 /* Fixup an Apple bug where they have bogus \0 chars in the 2540 * middle of the path in some properties, and extract 2541 * the unit name (everything after the last '/'). 2542 */ 2543 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2544 if (*p == '/') 2545 lp = namep; 2546 else if (*p != 0) 2547 *lp++ = *p; 2548 } 2549 *lp = 0; 2550 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2551 } 2552 2553 /* get it again for debugging */ 2554 path = prom_scratch; 2555 memset(path, 0, sizeof(prom_scratch)); 2556 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 2557 2558 /* get and store all properties */ 2559 prev_name = ""; 2560 sstart = (char *)dt_string_start; 2561 for (;;) { 2562 if (call_prom("nextprop", 3, 1, node, prev_name, 2563 pname) != 1) 2564 break; 2565 2566 /* skip "name" */ 2567 if (prom_strcmp(pname, "name") == 0) { 2568 prev_name = "name"; 2569 continue; 2570 } 2571 2572 /* find string offset */ 2573 soff = dt_find_string(pname); 2574 if (soff == 0) { 2575 prom_printf("WARNING: Can't find string index for" 2576 " <%s>, node %s\n", pname, path); 2577 break; 2578 } 2579 prev_name = sstart + soff; 2580 2581 /* get length */ 2582 l = call_prom("getproplen", 2, 1, node, pname); 2583 2584 /* sanity checks */ 2585 if (l == PROM_ERROR) 2586 continue; 2587 2588 /* push property head */ 2589 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2590 dt_push_token(l, mem_start, mem_end); 2591 dt_push_token(soff, mem_start, mem_end); 2592 2593 /* push property content */ 2594 valp = make_room(mem_start, mem_end, l, 4); 2595 call_prom("getprop", 4, 1, node, pname, valp, l); 2596 *mem_start = _ALIGN(*mem_start, 4); 2597 2598 if (!prom_strcmp(pname, "phandle")) 2599 has_phandle = 1; 2600 } 2601 2602 /* Add a "phandle" property if none already exist */ 2603 if (!has_phandle) { 2604 soff = dt_find_string("phandle"); 2605 if (soff == 0) 2606 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); 2607 else { 2608 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2609 dt_push_token(4, mem_start, mem_end); 2610 dt_push_token(soff, mem_start, mem_end); 2611 valp = make_room(mem_start, mem_end, 4, 4); 2612 *(__be32 *)valp = cpu_to_be32(node); 2613 } 2614 } 2615 2616 /* do all our children */ 2617 child = call_prom("child", 1, 1, node); 2618 while (child != 0) { 2619 scan_dt_build_struct(child, mem_start, mem_end); 2620 child = call_prom("peer", 1, 1, child); 2621 } 2622 2623 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2624 } 2625 2626 static void __init flatten_device_tree(void) 2627 { 2628 phandle root; 2629 unsigned long mem_start, mem_end, room; 2630 struct boot_param_header *hdr; 2631 char *namep; 2632 u64 *rsvmap; 2633 2634 /* 2635 * Check how much room we have between alloc top & bottom (+/- a 2636 * few pages), crop to 1MB, as this is our "chunk" size 2637 */ 2638 room = alloc_top - alloc_bottom - 0x4000; 2639 if (room > DEVTREE_CHUNK_SIZE) 2640 room = DEVTREE_CHUNK_SIZE; 2641 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2642 2643 /* Now try to claim that */ 2644 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2645 if (mem_start == 0) 2646 prom_panic("Can't allocate initial device-tree chunk\n"); 2647 mem_end = mem_start + room; 2648 2649 /* Get root of tree */ 2650 root = call_prom("peer", 1, 1, (phandle)0); 2651 if (root == (phandle)0) 2652 prom_panic ("couldn't get device tree root\n"); 2653 2654 /* Build header and make room for mem rsv map */ 2655 mem_start = _ALIGN(mem_start, 4); 2656 hdr = make_room(&mem_start, &mem_end, 2657 sizeof(struct boot_param_header), 4); 2658 dt_header_start = (unsigned long)hdr; 2659 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2660 2661 /* Start of strings */ 2662 mem_start = PAGE_ALIGN(mem_start); 2663 dt_string_start = mem_start; 2664 mem_start += 4; /* hole */ 2665 2666 /* Add "phandle" in there, we'll need it */ 2667 namep = make_room(&mem_start, &mem_end, 16, 1); 2668 prom_strcpy(namep, "phandle"); 2669 mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2670 2671 /* Build string array */ 2672 prom_printf("Building dt strings...\n"); 2673 scan_dt_build_strings(root, &mem_start, &mem_end); 2674 dt_string_end = mem_start; 2675 2676 /* Build structure */ 2677 mem_start = PAGE_ALIGN(mem_start); 2678 dt_struct_start = mem_start; 2679 prom_printf("Building dt structure...\n"); 2680 scan_dt_build_struct(root, &mem_start, &mem_end); 2681 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2682 dt_struct_end = PAGE_ALIGN(mem_start); 2683 2684 /* Finish header */ 2685 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2686 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2687 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2688 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2689 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2690 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2691 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2692 hdr->version = cpu_to_be32(OF_DT_VERSION); 2693 /* Version 16 is not backward compatible */ 2694 hdr->last_comp_version = cpu_to_be32(0x10); 2695 2696 /* Copy the reserve map in */ 2697 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2698 2699 #ifdef DEBUG_PROM 2700 { 2701 int i; 2702 prom_printf("reserved memory map:\n"); 2703 for (i = 0; i < mem_reserve_cnt; i++) 2704 prom_printf(" %llx - %llx\n", 2705 be64_to_cpu(mem_reserve_map[i].base), 2706 be64_to_cpu(mem_reserve_map[i].size)); 2707 } 2708 #endif 2709 /* Bump mem_reserve_cnt to cause further reservations to fail 2710 * since it's too late. 2711 */ 2712 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2713 2714 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2715 dt_string_start, dt_string_end); 2716 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2717 dt_struct_start, dt_struct_end); 2718 } 2719 2720 #ifdef CONFIG_PPC_MAPLE 2721 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2722 * The values are bad, and it doesn't even have the right number of cells. */ 2723 static void __init fixup_device_tree_maple(void) 2724 { 2725 phandle isa; 2726 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2727 u32 isa_ranges[6]; 2728 char *name; 2729 2730 name = "/ht@0/isa@4"; 2731 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2732 if (!PHANDLE_VALID(isa)) { 2733 name = "/ht@0/isa@6"; 2734 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2735 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2736 } 2737 if (!PHANDLE_VALID(isa)) 2738 return; 2739 2740 if (prom_getproplen(isa, "ranges") != 12) 2741 return; 2742 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2743 == PROM_ERROR) 2744 return; 2745 2746 if (isa_ranges[0] != 0x1 || 2747 isa_ranges[1] != 0xf4000000 || 2748 isa_ranges[2] != 0x00010000) 2749 return; 2750 2751 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2752 2753 isa_ranges[0] = 0x1; 2754 isa_ranges[1] = 0x0; 2755 isa_ranges[2] = rloc; 2756 isa_ranges[3] = 0x0; 2757 isa_ranges[4] = 0x0; 2758 isa_ranges[5] = 0x00010000; 2759 prom_setprop(isa, name, "ranges", 2760 isa_ranges, sizeof(isa_ranges)); 2761 } 2762 2763 #define CPC925_MC_START 0xf8000000 2764 #define CPC925_MC_LENGTH 0x1000000 2765 /* The values for memory-controller don't have right number of cells */ 2766 static void __init fixup_device_tree_maple_memory_controller(void) 2767 { 2768 phandle mc; 2769 u32 mc_reg[4]; 2770 char *name = "/hostbridge@f8000000"; 2771 u32 ac, sc; 2772 2773 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2774 if (!PHANDLE_VALID(mc)) 2775 return; 2776 2777 if (prom_getproplen(mc, "reg") != 8) 2778 return; 2779 2780 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2781 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2782 if ((ac != 2) || (sc != 2)) 2783 return; 2784 2785 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2786 return; 2787 2788 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2789 return; 2790 2791 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2792 2793 mc_reg[0] = 0x0; 2794 mc_reg[1] = CPC925_MC_START; 2795 mc_reg[2] = 0x0; 2796 mc_reg[3] = CPC925_MC_LENGTH; 2797 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2798 } 2799 #else 2800 #define fixup_device_tree_maple() 2801 #define fixup_device_tree_maple_memory_controller() 2802 #endif 2803 2804 #ifdef CONFIG_PPC_CHRP 2805 /* 2806 * Pegasos and BriQ lacks the "ranges" property in the isa node 2807 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2808 * Pegasos has the IDE configured in legacy mode, but advertised as native 2809 */ 2810 static void __init fixup_device_tree_chrp(void) 2811 { 2812 phandle ph; 2813 u32 prop[6]; 2814 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2815 char *name; 2816 int rc; 2817 2818 name = "/pci@80000000/isa@c"; 2819 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2820 if (!PHANDLE_VALID(ph)) { 2821 name = "/pci@ff500000/isa@6"; 2822 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2823 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2824 } 2825 if (PHANDLE_VALID(ph)) { 2826 rc = prom_getproplen(ph, "ranges"); 2827 if (rc == 0 || rc == PROM_ERROR) { 2828 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2829 2830 prop[0] = 0x1; 2831 prop[1] = 0x0; 2832 prop[2] = rloc; 2833 prop[3] = 0x0; 2834 prop[4] = 0x0; 2835 prop[5] = 0x00010000; 2836 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2837 } 2838 } 2839 2840 name = "/pci@80000000/ide@C,1"; 2841 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2842 if (PHANDLE_VALID(ph)) { 2843 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2844 prop[0] = 14; 2845 prop[1] = 0x0; 2846 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2847 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2848 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2849 if (rc == sizeof(u32)) { 2850 prop[0] &= ~0x5; 2851 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2852 } 2853 } 2854 } 2855 #else 2856 #define fixup_device_tree_chrp() 2857 #endif 2858 2859 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2860 static void __init fixup_device_tree_pmac(void) 2861 { 2862 phandle u3, i2c, mpic; 2863 u32 u3_rev; 2864 u32 interrupts[2]; 2865 u32 parent; 2866 2867 /* Some G5s have a missing interrupt definition, fix it up here */ 2868 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2869 if (!PHANDLE_VALID(u3)) 2870 return; 2871 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2872 if (!PHANDLE_VALID(i2c)) 2873 return; 2874 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2875 if (!PHANDLE_VALID(mpic)) 2876 return; 2877 2878 /* check if proper rev of u3 */ 2879 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2880 == PROM_ERROR) 2881 return; 2882 if (u3_rev < 0x35 || u3_rev > 0x39) 2883 return; 2884 /* does it need fixup ? */ 2885 if (prom_getproplen(i2c, "interrupts") > 0) 2886 return; 2887 2888 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2889 2890 /* interrupt on this revision of u3 is number 0 and level */ 2891 interrupts[0] = 0; 2892 interrupts[1] = 1; 2893 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2894 &interrupts, sizeof(interrupts)); 2895 parent = (u32)mpic; 2896 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2897 &parent, sizeof(parent)); 2898 } 2899 #else 2900 #define fixup_device_tree_pmac() 2901 #endif 2902 2903 #ifdef CONFIG_PPC_EFIKA 2904 /* 2905 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2906 * to talk to the phy. If the phy-handle property is missing, then this 2907 * function is called to add the appropriate nodes and link it to the 2908 * ethernet node. 2909 */ 2910 static void __init fixup_device_tree_efika_add_phy(void) 2911 { 2912 u32 node; 2913 char prop[64]; 2914 int rv; 2915 2916 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2917 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2918 if (!PHANDLE_VALID(node)) 2919 return; 2920 2921 /* Check if the phy-handle property exists - bail if it does */ 2922 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2923 if (!rv) 2924 return; 2925 2926 /* 2927 * At this point the ethernet device doesn't have a phy described. 2928 * Now we need to add the missing phy node and linkage 2929 */ 2930 2931 /* Check for an MDIO bus node - if missing then create one */ 2932 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2933 if (!PHANDLE_VALID(node)) { 2934 prom_printf("Adding Ethernet MDIO node\n"); 2935 call_prom("interpret", 1, 1, 2936 " s\" /builtin\" find-device" 2937 " new-device" 2938 " 1 encode-int s\" #address-cells\" property" 2939 " 0 encode-int s\" #size-cells\" property" 2940 " s\" mdio\" device-name" 2941 " s\" fsl,mpc5200b-mdio\" encode-string" 2942 " s\" compatible\" property" 2943 " 0xf0003000 0x400 reg" 2944 " 0x2 encode-int" 2945 " 0x5 encode-int encode+" 2946 " 0x3 encode-int encode+" 2947 " s\" interrupts\" property" 2948 " finish-device"); 2949 }; 2950 2951 /* Check for a PHY device node - if missing then create one and 2952 * give it's phandle to the ethernet node */ 2953 node = call_prom("finddevice", 1, 1, 2954 ADDR("/builtin/mdio/ethernet-phy")); 2955 if (!PHANDLE_VALID(node)) { 2956 prom_printf("Adding Ethernet PHY node\n"); 2957 call_prom("interpret", 1, 1, 2958 " s\" /builtin/mdio\" find-device" 2959 " new-device" 2960 " s\" ethernet-phy\" device-name" 2961 " 0x10 encode-int s\" reg\" property" 2962 " my-self" 2963 " ihandle>phandle" 2964 " finish-device" 2965 " s\" /builtin/ethernet\" find-device" 2966 " encode-int" 2967 " s\" phy-handle\" property" 2968 " device-end"); 2969 } 2970 } 2971 2972 static void __init fixup_device_tree_efika(void) 2973 { 2974 int sound_irq[3] = { 2, 2, 0 }; 2975 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2976 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2977 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2978 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2979 u32 node; 2980 char prop[64]; 2981 int rv, len; 2982 2983 /* Check if we're really running on a EFIKA */ 2984 node = call_prom("finddevice", 1, 1, ADDR("/")); 2985 if (!PHANDLE_VALID(node)) 2986 return; 2987 2988 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2989 if (rv == PROM_ERROR) 2990 return; 2991 if (prom_strcmp(prop, "EFIKA5K2")) 2992 return; 2993 2994 prom_printf("Applying EFIKA device tree fixups\n"); 2995 2996 /* Claiming to be 'chrp' is death */ 2997 node = call_prom("finddevice", 1, 1, ADDR("/")); 2998 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2999 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0)) 3000 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 3001 3002 /* CODEGEN,description is exposed in /proc/cpuinfo so 3003 fix that too */ 3004 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 3005 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP"))) 3006 prom_setprop(node, "/", "CODEGEN,description", 3007 "Efika 5200B PowerPC System", 3008 sizeof("Efika 5200B PowerPC System")); 3009 3010 /* Fixup bestcomm interrupts property */ 3011 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 3012 if (PHANDLE_VALID(node)) { 3013 len = prom_getproplen(node, "interrupts"); 3014 if (len == 12) { 3015 prom_printf("Fixing bestcomm interrupts property\n"); 3016 prom_setprop(node, "/builtin/bestcom", "interrupts", 3017 bcomm_irq, sizeof(bcomm_irq)); 3018 } 3019 } 3020 3021 /* Fixup sound interrupts property */ 3022 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 3023 if (PHANDLE_VALID(node)) { 3024 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 3025 if (rv == PROM_ERROR) { 3026 prom_printf("Adding sound interrupts property\n"); 3027 prom_setprop(node, "/builtin/sound", "interrupts", 3028 sound_irq, sizeof(sound_irq)); 3029 } 3030 } 3031 3032 /* Make sure ethernet phy-handle property exists */ 3033 fixup_device_tree_efika_add_phy(); 3034 } 3035 #else 3036 #define fixup_device_tree_efika() 3037 #endif 3038 3039 #ifdef CONFIG_PPC_PASEMI_NEMO 3040 /* 3041 * CFE supplied on Nemo is broken in several ways, biggest 3042 * problem is that it reassigns ISA interrupts to unused mpic ints. 3043 * Add an interrupt-controller property for the io-bridge to use 3044 * and correct the ints so we can attach them to an irq_domain 3045 */ 3046 static void __init fixup_device_tree_pasemi(void) 3047 { 3048 u32 interrupts[2], parent, rval, val = 0; 3049 char *name, *pci_name; 3050 phandle iob, node; 3051 3052 /* Find the root pci node */ 3053 name = "/pxp@0,e0000000"; 3054 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3055 if (!PHANDLE_VALID(iob)) 3056 return; 3057 3058 /* check if interrupt-controller node set yet */ 3059 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 3060 return; 3061 3062 prom_printf("adding interrupt-controller property for SB600...\n"); 3063 3064 prom_setprop(iob, name, "interrupt-controller", &val, 0); 3065 3066 pci_name = "/pxp@0,e0000000/pci@11"; 3067 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 3068 parent = ADDR(iob); 3069 3070 for( ; prom_next_node(&node); ) { 3071 /* scan each node for one with an interrupt */ 3072 if (!PHANDLE_VALID(node)) 3073 continue; 3074 3075 rval = prom_getproplen(node, "interrupts"); 3076 if (rval == 0 || rval == PROM_ERROR) 3077 continue; 3078 3079 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 3080 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 3081 continue; 3082 3083 /* found a node, update both interrupts and interrupt-parent */ 3084 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 3085 interrupts[0] -= 203; 3086 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 3087 interrupts[0] -= 213; 3088 if (interrupts[0] == 221) 3089 interrupts[0] = 14; 3090 if (interrupts[0] == 222) 3091 interrupts[0] = 8; 3092 3093 prom_setprop(node, pci_name, "interrupts", interrupts, 3094 sizeof(interrupts)); 3095 prom_setprop(node, pci_name, "interrupt-parent", &parent, 3096 sizeof(parent)); 3097 } 3098 3099 /* 3100 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 3101 * so that generic isa-bridge code can add the SB600 and its on-board 3102 * peripherals. 3103 */ 3104 name = "/pxp@0,e0000000/io-bridge@0"; 3105 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3106 if (!PHANDLE_VALID(iob)) 3107 return; 3108 3109 /* device_type is already set, just change it. */ 3110 3111 prom_printf("Changing device_type of SB600 node...\n"); 3112 3113 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 3114 } 3115 #else /* !CONFIG_PPC_PASEMI_NEMO */ 3116 static inline void fixup_device_tree_pasemi(void) { } 3117 #endif 3118 3119 static void __init fixup_device_tree(void) 3120 { 3121 fixup_device_tree_maple(); 3122 fixup_device_tree_maple_memory_controller(); 3123 fixup_device_tree_chrp(); 3124 fixup_device_tree_pmac(); 3125 fixup_device_tree_efika(); 3126 fixup_device_tree_pasemi(); 3127 } 3128 3129 static void __init prom_find_boot_cpu(void) 3130 { 3131 __be32 rval; 3132 ihandle prom_cpu; 3133 phandle cpu_pkg; 3134 3135 rval = 0; 3136 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3137 return; 3138 prom_cpu = be32_to_cpu(rval); 3139 3140 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3141 3142 if (!PHANDLE_VALID(cpu_pkg)) 3143 return; 3144 3145 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3146 prom.cpu = be32_to_cpu(rval); 3147 3148 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3149 } 3150 3151 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3152 { 3153 #ifdef CONFIG_BLK_DEV_INITRD 3154 if (r3 && r4 && r4 != 0xdeadbeef) { 3155 __be64 val; 3156 3157 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3158 prom_initrd_end = prom_initrd_start + r4; 3159 3160 val = cpu_to_be64(prom_initrd_start); 3161 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3162 &val, sizeof(val)); 3163 val = cpu_to_be64(prom_initrd_end); 3164 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3165 &val, sizeof(val)); 3166 3167 reserve_mem(prom_initrd_start, 3168 prom_initrd_end - prom_initrd_start); 3169 3170 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3171 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3172 } 3173 #endif /* CONFIG_BLK_DEV_INITRD */ 3174 } 3175 3176 #ifdef CONFIG_PPC64 3177 #ifdef CONFIG_RELOCATABLE 3178 static void reloc_toc(void) 3179 { 3180 } 3181 3182 static void unreloc_toc(void) 3183 { 3184 } 3185 #else 3186 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3187 { 3188 unsigned long i; 3189 unsigned long *toc_entry; 3190 3191 /* Get the start of the TOC by using r2 directly. */ 3192 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3193 3194 for (i = 0; i < nr_entries; i++) { 3195 *toc_entry = *toc_entry + offset; 3196 toc_entry++; 3197 } 3198 } 3199 3200 static void reloc_toc(void) 3201 { 3202 unsigned long offset = reloc_offset(); 3203 unsigned long nr_entries = 3204 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3205 3206 __reloc_toc(offset, nr_entries); 3207 3208 mb(); 3209 } 3210 3211 static void unreloc_toc(void) 3212 { 3213 unsigned long offset = reloc_offset(); 3214 unsigned long nr_entries = 3215 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3216 3217 mb(); 3218 3219 __reloc_toc(-offset, nr_entries); 3220 } 3221 #endif 3222 #endif 3223 3224 #ifdef CONFIG_PPC_SVM 3225 /* 3226 * Perform the Enter Secure Mode ultracall. 3227 */ 3228 static int enter_secure_mode(unsigned long kbase, unsigned long fdt) 3229 { 3230 register unsigned long r3 asm("r3") = UV_ESM; 3231 register unsigned long r4 asm("r4") = kbase; 3232 register unsigned long r5 asm("r5") = fdt; 3233 3234 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5)); 3235 3236 return r3; 3237 } 3238 3239 /* 3240 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob. 3241 */ 3242 static void setup_secure_guest(unsigned long kbase, unsigned long fdt) 3243 { 3244 int ret; 3245 3246 if (!prom_svm_enable) 3247 return; 3248 3249 /* Switch to secure mode. */ 3250 prom_printf("Switching to secure mode.\n"); 3251 3252 ret = enter_secure_mode(kbase, fdt); 3253 if (ret != U_SUCCESS) { 3254 prom_printf("Returned %d from switching to secure mode.\n", ret); 3255 prom_rtas_os_term("Switch to secure mode failed.\n"); 3256 } 3257 } 3258 #else 3259 static void setup_secure_guest(unsigned long kbase, unsigned long fdt) 3260 { 3261 } 3262 #endif /* CONFIG_PPC_SVM */ 3263 3264 /* 3265 * We enter here early on, when the Open Firmware prom is still 3266 * handling exceptions and the MMU hash table for us. 3267 */ 3268 3269 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3270 unsigned long pp, 3271 unsigned long r6, unsigned long r7, 3272 unsigned long kbase) 3273 { 3274 unsigned long hdr; 3275 3276 #ifdef CONFIG_PPC32 3277 unsigned long offset = reloc_offset(); 3278 reloc_got2(offset); 3279 #else 3280 reloc_toc(); 3281 #endif 3282 3283 /* 3284 * First zero the BSS 3285 */ 3286 memset(&__bss_start, 0, __bss_stop - __bss_start); 3287 3288 /* 3289 * Init interface to Open Firmware, get some node references, 3290 * like /chosen 3291 */ 3292 prom_init_client_services(pp); 3293 3294 /* 3295 * See if this OF is old enough that we need to do explicit maps 3296 * and other workarounds 3297 */ 3298 prom_find_mmu(); 3299 3300 /* 3301 * Init prom stdout device 3302 */ 3303 prom_init_stdout(); 3304 3305 prom_printf("Preparing to boot %s", linux_banner); 3306 3307 /* 3308 * Get default machine type. At this point, we do not differentiate 3309 * between pSeries SMP and pSeries LPAR 3310 */ 3311 of_platform = prom_find_machine_type(); 3312 prom_printf("Detected machine type: %x\n", of_platform); 3313 3314 #ifndef CONFIG_NONSTATIC_KERNEL 3315 /* Bail if this is a kdump kernel. */ 3316 if (PHYSICAL_START > 0) 3317 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3318 #endif 3319 3320 /* 3321 * Check for an initrd 3322 */ 3323 prom_check_initrd(r3, r4); 3324 3325 /* 3326 * Do early parsing of command line 3327 */ 3328 early_cmdline_parse(); 3329 3330 #ifdef CONFIG_PPC_PSERIES 3331 /* 3332 * On pSeries, inform the firmware about our capabilities 3333 */ 3334 if (of_platform == PLATFORM_PSERIES || 3335 of_platform == PLATFORM_PSERIES_LPAR) 3336 prom_send_capabilities(); 3337 #endif 3338 3339 /* 3340 * Copy the CPU hold code 3341 */ 3342 if (of_platform != PLATFORM_POWERMAC) 3343 copy_and_flush(0, kbase, 0x100, 0); 3344 3345 /* 3346 * Initialize memory management within prom_init 3347 */ 3348 prom_init_mem(); 3349 3350 /* 3351 * Determine which cpu is actually running right _now_ 3352 */ 3353 prom_find_boot_cpu(); 3354 3355 /* 3356 * Initialize display devices 3357 */ 3358 prom_check_displays(); 3359 3360 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3361 /* 3362 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3363 * that uses the allocator, we need to make sure we get the top of memory 3364 * available for us here... 3365 */ 3366 if (of_platform == PLATFORM_PSERIES) 3367 prom_initialize_tce_table(); 3368 #endif 3369 3370 /* 3371 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3372 * have a usable RTAS implementation. 3373 */ 3374 if (of_platform != PLATFORM_POWERMAC) 3375 prom_instantiate_rtas(); 3376 3377 #ifdef CONFIG_PPC64 3378 /* instantiate sml */ 3379 prom_instantiate_sml(); 3380 #endif 3381 3382 /* 3383 * On non-powermacs, put all CPUs in spin-loops. 3384 * 3385 * PowerMacs use a different mechanism to spin CPUs 3386 * 3387 * (This must be done after instanciating RTAS) 3388 */ 3389 if (of_platform != PLATFORM_POWERMAC) 3390 prom_hold_cpus(); 3391 3392 /* 3393 * Fill in some infos for use by the kernel later on 3394 */ 3395 if (prom_memory_limit) { 3396 __be64 val = cpu_to_be64(prom_memory_limit); 3397 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3398 &val, sizeof(val)); 3399 } 3400 #ifdef CONFIG_PPC64 3401 if (prom_iommu_off) 3402 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3403 NULL, 0); 3404 3405 if (prom_iommu_force_on) 3406 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3407 NULL, 0); 3408 3409 if (prom_tce_alloc_start) { 3410 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3411 &prom_tce_alloc_start, 3412 sizeof(prom_tce_alloc_start)); 3413 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3414 &prom_tce_alloc_end, 3415 sizeof(prom_tce_alloc_end)); 3416 } 3417 #endif 3418 3419 /* 3420 * Fixup any known bugs in the device-tree 3421 */ 3422 fixup_device_tree(); 3423 3424 /* 3425 * Now finally create the flattened device-tree 3426 */ 3427 prom_printf("copying OF device tree...\n"); 3428 flatten_device_tree(); 3429 3430 /* 3431 * in case stdin is USB and still active on IBM machines... 3432 * Unfortunately quiesce crashes on some powermacs if we have 3433 * closed stdin already (in particular the powerbook 101). 3434 */ 3435 if (of_platform != PLATFORM_POWERMAC) 3436 prom_close_stdin(); 3437 3438 /* 3439 * Call OF "quiesce" method to shut down pending DMA's from 3440 * devices etc... 3441 */ 3442 prom_printf("Quiescing Open Firmware ...\n"); 3443 call_prom("quiesce", 0, 0); 3444 3445 /* 3446 * And finally, call the kernel passing it the flattened device 3447 * tree and NULL as r5, thus triggering the new entry point which 3448 * is common to us and kexec 3449 */ 3450 hdr = dt_header_start; 3451 3452 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 3453 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3454 prom_debug("->dt_header_start=0x%lx\n", hdr); 3455 3456 #ifdef CONFIG_PPC32 3457 reloc_got2(-offset); 3458 #else 3459 unreloc_toc(); 3460 #endif 3461 3462 /* Move to secure memory if we're supposed to be secure guests. */ 3463 setup_secure_guest(kbase, hdr); 3464 3465 __start(hdr, kbase, 0, 0, 0, 0, 0); 3466 3467 return 0; 3468 } 3469