1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions for working with the Flattened Device Tree data format 4 * 5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp 6 * benh@kernel.crashing.org 7 */ 8 9 #define pr_fmt(fmt) "OF: fdt: " fmt 10 11 #include <linux/acpi.h> 12 #include <linux/crash_dump.h> 13 #include <linux/crc32.h> 14 #include <linux/kernel.h> 15 #include <linux/initrd.h> 16 #include <linux/memblock.h> 17 #include <linux/mutex.h> 18 #include <linux/of.h> 19 #include <linux/of_fdt.h> 20 #include <linux/sizes.h> 21 #include <linux/string.h> 22 #include <linux/errno.h> 23 #include <linux/slab.h> 24 #include <linux/libfdt.h> 25 #include <linux/debugfs.h> 26 #include <linux/serial_core.h> 27 #include <linux/sysfs.h> 28 #include <linux/random.h> 29 30 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 31 #include <asm/page.h> 32 33 #include "of_private.h" 34 35 /* 36 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by 37 * cmd_wrap_S_dtb in scripts/Makefile.dtbs 38 */ 39 extern uint8_t __dtb_empty_root_begin[]; 40 extern uint8_t __dtb_empty_root_end[]; 41 42 /* 43 * of_fdt_limit_memory - limit the number of regions in the /memory node 44 * @limit: maximum entries 45 * 46 * Adjust the flattened device tree to have at most 'limit' number of 47 * memory entries in the /memory node. This function may be called 48 * any time after initial_boot_param is set. 49 */ 50 void __init of_fdt_limit_memory(int limit) 51 { 52 int memory; 53 int len; 54 const void *val; 55 int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells); 56 57 memory = fdt_path_offset(initial_boot_params, "/memory"); 58 if (memory > 0) { 59 val = fdt_getprop(initial_boot_params, memory, "reg", &len); 60 if (len > limit*cell_size) { 61 len = limit*cell_size; 62 pr_debug("Limiting number of entries to %d\n", limit); 63 fdt_setprop(initial_boot_params, memory, "reg", val, 64 len); 65 } 66 } 67 } 68 69 bool of_fdt_device_is_available(const void *blob, unsigned long node) 70 { 71 const char *status = fdt_getprop(blob, node, "status", NULL); 72 73 if (!status) 74 return true; 75 76 if (!strcmp(status, "ok") || !strcmp(status, "okay")) 77 return true; 78 79 return false; 80 } 81 82 static void *unflatten_dt_alloc(void **mem, unsigned long size, 83 unsigned long align) 84 { 85 void *res; 86 87 *mem = PTR_ALIGN(*mem, align); 88 res = *mem; 89 *mem += size; 90 91 return res; 92 } 93 94 static void populate_properties(const void *blob, 95 int offset, 96 void **mem, 97 struct device_node *np, 98 const char *nodename, 99 bool dryrun) 100 { 101 struct property *pp, **pprev = NULL; 102 int cur; 103 bool has_name = false; 104 105 pprev = &np->properties; 106 for (cur = fdt_first_property_offset(blob, offset); 107 cur >= 0; 108 cur = fdt_next_property_offset(blob, cur)) { 109 const __be32 *val; 110 const char *pname; 111 u32 sz; 112 113 val = fdt_getprop_by_offset(blob, cur, &pname, &sz); 114 if (!val) { 115 pr_warn("Cannot locate property at 0x%x\n", cur); 116 continue; 117 } 118 119 if (!pname) { 120 pr_warn("Cannot find property name at 0x%x\n", cur); 121 continue; 122 } 123 124 if (!strcmp(pname, "name")) 125 has_name = true; 126 127 pp = unflatten_dt_alloc(mem, sizeof(struct property), 128 __alignof__(struct property)); 129 if (dryrun) 130 continue; 131 132 /* We accept flattened tree phandles either in 133 * ePAPR-style "phandle" properties, or the 134 * legacy "linux,phandle" properties. If both 135 * appear and have different values, things 136 * will get weird. Don't do that. 137 */ 138 if (!strcmp(pname, "phandle") || 139 !strcmp(pname, "linux,phandle")) { 140 if (!np->phandle) 141 np->phandle = be32_to_cpup(val); 142 } 143 144 /* And we process the "ibm,phandle" property 145 * used in pSeries dynamic device tree 146 * stuff 147 */ 148 if (!strcmp(pname, "ibm,phandle")) 149 np->phandle = be32_to_cpup(val); 150 151 pp->name = (char *)pname; 152 pp->length = sz; 153 pp->value = (__be32 *)val; 154 *pprev = pp; 155 pprev = &pp->next; 156 } 157 158 /* With version 0x10 we may not have the name property, 159 * recreate it here from the unit name if absent 160 */ 161 if (!has_name) { 162 const char *p = nodename, *ps = p, *pa = NULL; 163 int len; 164 165 while (*p) { 166 if ((*p) == '@') 167 pa = p; 168 else if ((*p) == '/') 169 ps = p + 1; 170 p++; 171 } 172 173 if (pa < ps) 174 pa = p; 175 len = (pa - ps) + 1; 176 pp = unflatten_dt_alloc(mem, sizeof(struct property) + len, 177 __alignof__(struct property)); 178 if (!dryrun) { 179 pp->name = "name"; 180 pp->length = len; 181 pp->value = pp + 1; 182 *pprev = pp; 183 memcpy(pp->value, ps, len - 1); 184 ((char *)pp->value)[len - 1] = 0; 185 pr_debug("fixed up name for %s -> %s\n", 186 nodename, (char *)pp->value); 187 } 188 } 189 } 190 191 static int populate_node(const void *blob, 192 int offset, 193 void **mem, 194 struct device_node *dad, 195 struct device_node **pnp, 196 bool dryrun) 197 { 198 struct device_node *np; 199 const char *pathp; 200 int len; 201 202 pathp = fdt_get_name(blob, offset, &len); 203 if (!pathp) { 204 *pnp = NULL; 205 return len; 206 } 207 208 len++; 209 210 np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len, 211 __alignof__(struct device_node)); 212 if (!dryrun) { 213 char *fn; 214 of_node_init(np); 215 np->full_name = fn = ((char *)np) + sizeof(*np); 216 217 memcpy(fn, pathp, len); 218 219 if (dad != NULL) { 220 np->parent = dad; 221 np->sibling = dad->child; 222 dad->child = np; 223 } 224 } 225 226 populate_properties(blob, offset, mem, np, pathp, dryrun); 227 if (!dryrun) { 228 np->name = of_get_property(np, "name", NULL); 229 if (!np->name) 230 np->name = "<NULL>"; 231 } 232 233 *pnp = np; 234 return 0; 235 } 236 237 static void reverse_nodes(struct device_node *parent) 238 { 239 struct device_node *child, *next; 240 241 /* In-depth first */ 242 child = parent->child; 243 while (child) { 244 reverse_nodes(child); 245 246 child = child->sibling; 247 } 248 249 /* Reverse the nodes in the child list */ 250 child = parent->child; 251 parent->child = NULL; 252 while (child) { 253 next = child->sibling; 254 255 child->sibling = parent->child; 256 parent->child = child; 257 child = next; 258 } 259 } 260 261 /** 262 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree 263 * @blob: The parent device tree blob 264 * @mem: Memory chunk to use for allocating device nodes and properties 265 * @dad: Parent struct device_node 266 * @nodepp: The device_node tree created by the call 267 * 268 * Return: The size of unflattened device tree or error code 269 */ 270 static int unflatten_dt_nodes(const void *blob, 271 void *mem, 272 struct device_node *dad, 273 struct device_node **nodepp) 274 { 275 struct device_node *root; 276 int offset = 0, depth = 0, initial_depth = 0; 277 #define FDT_MAX_DEPTH 64 278 struct device_node *nps[FDT_MAX_DEPTH]; 279 void *base = mem; 280 bool dryrun = !base; 281 int ret; 282 283 if (nodepp) 284 *nodepp = NULL; 285 286 /* 287 * We're unflattening device sub-tree if @dad is valid. There are 288 * possibly multiple nodes in the first level of depth. We need 289 * set @depth to 1 to make fdt_next_node() happy as it bails 290 * immediately when negative @depth is found. Otherwise, the device 291 * nodes except the first one won't be unflattened successfully. 292 */ 293 if (dad) 294 depth = initial_depth = 1; 295 296 root = dad; 297 nps[depth] = dad; 298 299 for (offset = 0; 300 offset >= 0 && depth >= initial_depth; 301 offset = fdt_next_node(blob, offset, &depth)) { 302 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) 303 continue; 304 305 if (!IS_ENABLED(CONFIG_OF_KOBJ) && 306 !of_fdt_device_is_available(blob, offset)) 307 continue; 308 309 ret = populate_node(blob, offset, &mem, nps[depth], 310 &nps[depth+1], dryrun); 311 if (ret < 0) 312 return ret; 313 314 if (!dryrun && nodepp && !*nodepp) 315 *nodepp = nps[depth+1]; 316 if (!dryrun && !root) 317 root = nps[depth+1]; 318 } 319 320 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) { 321 pr_err("Error %d processing FDT\n", offset); 322 return -EINVAL; 323 } 324 325 /* 326 * Reverse the child list. Some drivers assumes node order matches .dts 327 * node order 328 */ 329 if (!dryrun) 330 reverse_nodes(root); 331 332 return mem - base; 333 } 334 335 /** 336 * __unflatten_device_tree - create tree of device_nodes from flat blob 337 * @blob: The blob to expand 338 * @dad: Parent device node 339 * @mynodes: The device_node tree created by the call 340 * @dt_alloc: An allocator that provides a virtual address to memory 341 * for the resulting tree 342 * @detached: if true set OF_DETACHED on @mynodes 343 * 344 * unflattens a device-tree, creating the tree of struct device_node. It also 345 * fills the "name" and "type" pointers of the nodes so the normal device-tree 346 * walking functions can be used. 347 * 348 * Return: NULL on failure or the memory chunk containing the unflattened 349 * device tree on success. 350 */ 351 void *__unflatten_device_tree(const void *blob, 352 struct device_node *dad, 353 struct device_node **mynodes, 354 void *(*dt_alloc)(u64 size, u64 align), 355 bool detached) 356 { 357 int size; 358 void *mem; 359 int ret; 360 361 if (mynodes) 362 *mynodes = NULL; 363 364 pr_debug(" -> unflatten_device_tree()\n"); 365 366 if (!blob) { 367 pr_debug("No device tree pointer\n"); 368 return NULL; 369 } 370 371 pr_debug("Unflattening device tree:\n"); 372 pr_debug("magic: %08x\n", fdt_magic(blob)); 373 pr_debug("size: %08x\n", fdt_totalsize(blob)); 374 pr_debug("version: %08x\n", fdt_version(blob)); 375 376 if (fdt_check_header(blob)) { 377 pr_err("Invalid device tree blob header\n"); 378 return NULL; 379 } 380 381 /* First pass, scan for size */ 382 size = unflatten_dt_nodes(blob, NULL, dad, NULL); 383 if (size <= 0) 384 return NULL; 385 386 size = ALIGN(size, 4); 387 pr_debug(" size is %d, allocating...\n", size); 388 389 /* Allocate memory for the expanded device tree */ 390 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 391 if (!mem) 392 return NULL; 393 394 memset(mem, 0, size); 395 396 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); 397 398 pr_debug(" unflattening %p...\n", mem); 399 400 /* Second pass, do actual unflattening */ 401 ret = unflatten_dt_nodes(blob, mem, dad, mynodes); 402 403 if (be32_to_cpup(mem + size) != 0xdeadbeef) 404 pr_warn("End of tree marker overwritten: %08x\n", 405 be32_to_cpup(mem + size)); 406 407 if (ret <= 0) 408 return NULL; 409 410 if (detached && mynodes && *mynodes) { 411 of_node_set_flag(*mynodes, OF_DETACHED); 412 pr_debug("unflattened tree is detached\n"); 413 } 414 415 pr_debug(" <- unflatten_device_tree()\n"); 416 return mem; 417 } 418 419 static void *kernel_tree_alloc(u64 size, u64 align) 420 { 421 return kzalloc(size, GFP_KERNEL); 422 } 423 424 static DEFINE_MUTEX(of_fdt_unflatten_mutex); 425 426 /** 427 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob 428 * @blob: Flat device tree blob 429 * @dad: Parent device node 430 * @mynodes: The device tree created by the call 431 * 432 * unflattens the device-tree passed by the firmware, creating the 433 * tree of struct device_node. It also fills the "name" and "type" 434 * pointers of the nodes so the normal device-tree walking functions 435 * can be used. 436 * 437 * Return: NULL on failure or the memory chunk containing the unflattened 438 * device tree on success. 439 */ 440 void *of_fdt_unflatten_tree(const unsigned long *blob, 441 struct device_node *dad, 442 struct device_node **mynodes) 443 { 444 void *mem; 445 446 mutex_lock(&of_fdt_unflatten_mutex); 447 mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc, 448 true); 449 mutex_unlock(&of_fdt_unflatten_mutex); 450 451 return mem; 452 } 453 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); 454 455 /* Everything below here references initial_boot_params directly. */ 456 int __initdata dt_root_addr_cells; 457 int __initdata dt_root_size_cells; 458 459 void *initial_boot_params __ro_after_init; 460 461 #ifdef CONFIG_OF_EARLY_FLATTREE 462 463 static u32 of_fdt_crc32; 464 465 /* 466 * fdt_reserve_elfcorehdr() - reserves memory for elf core header 467 * 468 * This function reserves the memory occupied by an elf core header 469 * described in the device tree. This region contains all the 470 * information about primary kernel's core image and is used by a dump 471 * capture kernel to access the system memory on primary kernel. 472 */ 473 static void __init fdt_reserve_elfcorehdr(void) 474 { 475 if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size) 476 return; 477 478 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { 479 pr_warn("elfcorehdr is overlapped\n"); 480 return; 481 } 482 483 memblock_reserve(elfcorehdr_addr, elfcorehdr_size); 484 485 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n", 486 elfcorehdr_size >> 10, elfcorehdr_addr); 487 } 488 489 /** 490 * early_init_fdt_scan_reserved_mem() - create reserved memory regions 491 * 492 * This function grabs memory from early allocator for device exclusive use 493 * defined in device tree structures. It should be called by arch specific code 494 * once the early allocator (i.e. memblock) has been fully activated. 495 */ 496 void __init early_init_fdt_scan_reserved_mem(void) 497 { 498 int n; 499 u64 base, size; 500 501 if (!initial_boot_params) 502 return; 503 504 fdt_scan_reserved_mem(); 505 fdt_reserve_elfcorehdr(); 506 507 /* Process header /memreserve/ fields */ 508 for (n = 0; ; n++) { 509 fdt_get_mem_rsv(initial_boot_params, n, &base, &size); 510 if (!size) 511 break; 512 memblock_reserve(base, size); 513 } 514 } 515 516 /** 517 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob 518 */ 519 void __init early_init_fdt_reserve_self(void) 520 { 521 if (!initial_boot_params) 522 return; 523 524 /* Reserve the dtb region */ 525 memblock_reserve(__pa(initial_boot_params), 526 fdt_totalsize(initial_boot_params)); 527 } 528 529 /** 530 * of_scan_flat_dt - scan flattened tree blob and call callback on each. 531 * @it: callback function 532 * @data: context data pointer 533 * 534 * This function is used to scan the flattened device-tree, it is 535 * used to extract the memory information at boot before we can 536 * unflatten the tree 537 */ 538 int __init of_scan_flat_dt(int (*it)(unsigned long node, 539 const char *uname, int depth, 540 void *data), 541 void *data) 542 { 543 const void *blob = initial_boot_params; 544 const char *pathp; 545 int offset, rc = 0, depth = -1; 546 547 if (!blob) 548 return 0; 549 550 for (offset = fdt_next_node(blob, -1, &depth); 551 offset >= 0 && depth >= 0 && !rc; 552 offset = fdt_next_node(blob, offset, &depth)) { 553 554 pathp = fdt_get_name(blob, offset, NULL); 555 rc = it(offset, pathp, depth, data); 556 } 557 return rc; 558 } 559 560 /** 561 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each. 562 * @parent: parent node 563 * @it: callback function 564 * @data: context data pointer 565 * 566 * This function is used to scan sub-nodes of a node. 567 */ 568 int __init of_scan_flat_dt_subnodes(unsigned long parent, 569 int (*it)(unsigned long node, 570 const char *uname, 571 void *data), 572 void *data) 573 { 574 const void *blob = initial_boot_params; 575 int node; 576 577 fdt_for_each_subnode(node, blob, parent) { 578 const char *pathp; 579 int rc; 580 581 pathp = fdt_get_name(blob, node, NULL); 582 rc = it(node, pathp, data); 583 if (rc) 584 return rc; 585 } 586 return 0; 587 } 588 589 /** 590 * of_get_flat_dt_subnode_by_name - get the subnode by given name 591 * 592 * @node: the parent node 593 * @uname: the name of subnode 594 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none 595 */ 596 597 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname) 598 { 599 return fdt_subnode_offset(initial_boot_params, node, uname); 600 } 601 602 /* 603 * of_get_flat_dt_root - find the root node in the flat blob 604 */ 605 unsigned long __init of_get_flat_dt_root(void) 606 { 607 return 0; 608 } 609 610 /* 611 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr 612 * 613 * This function can be used within scan_flattened_dt callback to get 614 * access to properties 615 */ 616 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name, 617 int *size) 618 { 619 return fdt_getprop(initial_boot_params, node, name, size); 620 } 621 622 /** 623 * of_fdt_is_compatible - Return true if given node from the given blob has 624 * compat in its compatible list 625 * @blob: A device tree blob 626 * @node: node to test 627 * @compat: compatible string to compare with compatible list. 628 * 629 * Return: a non-zero value on match with smaller values returned for more 630 * specific compatible values. 631 */ 632 static int of_fdt_is_compatible(const void *blob, 633 unsigned long node, const char *compat) 634 { 635 const char *cp; 636 int cplen; 637 unsigned long l, score = 0; 638 639 cp = fdt_getprop(blob, node, "compatible", &cplen); 640 if (cp == NULL) 641 return 0; 642 while (cplen > 0) { 643 score++; 644 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) 645 return score; 646 l = strlen(cp) + 1; 647 cp += l; 648 cplen -= l; 649 } 650 651 return 0; 652 } 653 654 /** 655 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list 656 * @node: node to test 657 * @compat: compatible string to compare with compatible list. 658 */ 659 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 660 { 661 return of_fdt_is_compatible(initial_boot_params, node, compat); 662 } 663 664 /* 665 * of_flat_dt_match - Return true if node matches a list of compatible values 666 */ 667 static int __init of_flat_dt_match(unsigned long node, const char *const *compat) 668 { 669 unsigned int tmp, score = 0; 670 671 if (!compat) 672 return 0; 673 674 while (*compat) { 675 tmp = of_fdt_is_compatible(initial_boot_params, node, *compat); 676 if (tmp && (score == 0 || (tmp < score))) 677 score = tmp; 678 compat++; 679 } 680 681 return score; 682 } 683 684 /* 685 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle 686 */ 687 uint32_t __init of_get_flat_dt_phandle(unsigned long node) 688 { 689 return fdt_get_phandle(initial_boot_params, node); 690 } 691 692 const char * __init of_flat_dt_get_machine_name(void) 693 { 694 const char *name; 695 unsigned long dt_root = of_get_flat_dt_root(); 696 697 name = of_get_flat_dt_prop(dt_root, "model", NULL); 698 if (!name) 699 name = of_get_flat_dt_prop(dt_root, "compatible", NULL); 700 return name; 701 } 702 703 /** 704 * of_flat_dt_match_machine - Iterate match tables to find matching machine. 705 * 706 * @default_match: A machine specific ptr to return in case of no match. 707 * @get_next_compat: callback function to return next compatible match table. 708 * 709 * Iterate through machine match tables to find the best match for the machine 710 * compatible string in the FDT. 711 */ 712 const void * __init of_flat_dt_match_machine(const void *default_match, 713 const void * (*get_next_compat)(const char * const**)) 714 { 715 const void *data = NULL; 716 const void *best_data = default_match; 717 const char *const *compat; 718 unsigned long dt_root; 719 unsigned int best_score = ~1, score = 0; 720 721 dt_root = of_get_flat_dt_root(); 722 while ((data = get_next_compat(&compat))) { 723 score = of_flat_dt_match(dt_root, compat); 724 if (score > 0 && score < best_score) { 725 best_data = data; 726 best_score = score; 727 } 728 } 729 if (!best_data) { 730 const char *prop; 731 int size; 732 733 pr_err("\n unrecognized device tree list:\n[ "); 734 735 prop = of_get_flat_dt_prop(dt_root, "compatible", &size); 736 if (prop) { 737 while (size > 0) { 738 printk("'%s' ", prop); 739 size -= strlen(prop) + 1; 740 prop += strlen(prop) + 1; 741 } 742 } 743 printk("]\n\n"); 744 return NULL; 745 } 746 747 pr_info("Machine model: %s\n", of_flat_dt_get_machine_name()); 748 749 return best_data; 750 } 751 752 static void __early_init_dt_declare_initrd(unsigned long start, 753 unsigned long end) 754 { 755 /* 756 * __va() is not yet available this early on some platforms. In that 757 * case, the platform uses phys_initrd_start/phys_initrd_size instead 758 * and does the VA conversion itself. 759 */ 760 if (!IS_ENABLED(CONFIG_ARM64) && 761 !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) { 762 initrd_start = (unsigned long)__va(start); 763 initrd_end = (unsigned long)__va(end); 764 initrd_below_start_ok = 1; 765 } 766 } 767 768 /** 769 * early_init_dt_check_for_initrd - Decode initrd location from flat tree 770 * @node: reference to node containing initrd location ('chosen') 771 */ 772 static void __init early_init_dt_check_for_initrd(unsigned long node) 773 { 774 u64 start, end; 775 int len; 776 const __be32 *prop; 777 778 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 779 return; 780 781 pr_debug("Looking for initrd properties... "); 782 783 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len); 784 if (!prop) 785 return; 786 start = of_read_number(prop, len/4); 787 788 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len); 789 if (!prop) 790 return; 791 end = of_read_number(prop, len/4); 792 if (start > end) 793 return; 794 795 __early_init_dt_declare_initrd(start, end); 796 phys_initrd_start = start; 797 phys_initrd_size = end - start; 798 799 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end); 800 } 801 802 /** 803 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat 804 * tree 805 * @node: reference to node containing elfcorehdr location ('chosen') 806 */ 807 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node) 808 { 809 const __be32 *prop; 810 int len; 811 812 if (!IS_ENABLED(CONFIG_CRASH_DUMP)) 813 return; 814 815 pr_debug("Looking for elfcorehdr property... "); 816 817 prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); 818 if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells))) 819 return; 820 821 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop); 822 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop); 823 824 pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n", 825 elfcorehdr_addr, elfcorehdr_size); 826 } 827 828 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND; 829 830 /* 831 * The main usage of linux,usable-memory-range is for crash dump kernel. 832 * Originally, the number of usable-memory regions is one. Now there may 833 * be two regions, low region and high region. 834 * To make compatibility with existing user-space and older kdump, the low 835 * region is always the last range of linux,usable-memory-range if exist. 836 */ 837 #define MAX_USABLE_RANGES 2 838 839 /** 840 * early_init_dt_check_for_usable_mem_range - Decode usable memory range 841 * location from flat tree 842 */ 843 void __init early_init_dt_check_for_usable_mem_range(void) 844 { 845 struct memblock_region rgn[MAX_USABLE_RANGES] = {0}; 846 const __be32 *prop, *endp; 847 int len, i; 848 unsigned long node = chosen_node_offset; 849 850 if ((long)node < 0) 851 return; 852 853 pr_debug("Looking for usable-memory-range property... "); 854 855 prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); 856 if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells))) 857 return; 858 859 endp = prop + (len / sizeof(__be32)); 860 for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) { 861 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop); 862 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop); 863 864 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n", 865 i, &rgn[i].base, &rgn[i].size); 866 } 867 868 memblock_cap_memory_range(rgn[0].base, rgn[0].size); 869 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++) 870 memblock_add(rgn[i].base, rgn[i].size); 871 } 872 873 #ifdef CONFIG_SERIAL_EARLYCON 874 875 int __init early_init_dt_scan_chosen_stdout(void) 876 { 877 int offset; 878 const char *p, *q, *options = NULL; 879 int l; 880 const struct earlycon_id *match; 881 const void *fdt = initial_boot_params; 882 int ret; 883 884 offset = fdt_path_offset(fdt, "/chosen"); 885 if (offset < 0) 886 offset = fdt_path_offset(fdt, "/chosen@0"); 887 if (offset < 0) 888 return -ENOENT; 889 890 p = fdt_getprop(fdt, offset, "stdout-path", &l); 891 if (!p) 892 p = fdt_getprop(fdt, offset, "linux,stdout-path", &l); 893 if (!p || !l) 894 return -ENOENT; 895 896 q = strchrnul(p, ':'); 897 if (*q != '\0') 898 options = q + 1; 899 l = q - p; 900 901 /* Get the node specified by stdout-path */ 902 offset = fdt_path_offset_namelen(fdt, p, l); 903 if (offset < 0) { 904 pr_warn("earlycon: stdout-path %.*s not found\n", l, p); 905 return 0; 906 } 907 908 for (match = __earlycon_table; match < __earlycon_table_end; match++) { 909 if (!match->compatible[0]) 910 continue; 911 912 if (fdt_node_check_compatible(fdt, offset, match->compatible)) 913 continue; 914 915 ret = of_setup_earlycon(match, offset, options); 916 if (!ret || ret == -EALREADY) 917 return 0; 918 } 919 return -ENODEV; 920 } 921 #endif 922 923 /* 924 * early_init_dt_scan_root - fetch the top level address and size cells 925 */ 926 int __init early_init_dt_scan_root(void) 927 { 928 const __be32 *prop; 929 const void *fdt = initial_boot_params; 930 int node = fdt_path_offset(fdt, "/"); 931 932 if (node < 0) 933 return -ENODEV; 934 935 dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 936 dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 937 938 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 939 if (prop) 940 dt_root_size_cells = be32_to_cpup(prop); 941 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); 942 943 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 944 if (prop) 945 dt_root_addr_cells = be32_to_cpup(prop); 946 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); 947 948 return 0; 949 } 950 951 u64 __init dt_mem_next_cell(int s, const __be32 **cellp) 952 { 953 const __be32 *p = *cellp; 954 955 *cellp = p + s; 956 return of_read_number(p, s); 957 } 958 959 /* 960 * early_init_dt_scan_memory - Look for and parse memory nodes 961 */ 962 int __init early_init_dt_scan_memory(void) 963 { 964 int node, found_memory = 0; 965 const void *fdt = initial_boot_params; 966 967 fdt_for_each_subnode(node, fdt, 0) { 968 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 969 const __be32 *reg, *endp; 970 int l; 971 bool hotpluggable; 972 973 /* We are scanning "memory" nodes only */ 974 if (type == NULL || strcmp(type, "memory") != 0) 975 continue; 976 977 if (!of_fdt_device_is_available(fdt, node)) 978 continue; 979 980 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); 981 if (reg == NULL) 982 reg = of_get_flat_dt_prop(node, "reg", &l); 983 if (reg == NULL) 984 continue; 985 986 endp = reg + (l / sizeof(__be32)); 987 hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL); 988 989 pr_debug("memory scan node %s, reg size %d,\n", 990 fdt_get_name(fdt, node, NULL), l); 991 992 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 993 u64 base, size; 994 995 base = dt_mem_next_cell(dt_root_addr_cells, ®); 996 size = dt_mem_next_cell(dt_root_size_cells, ®); 997 998 if (size == 0) 999 continue; 1000 pr_debug(" - %llx, %llx\n", base, size); 1001 1002 early_init_dt_add_memory_arch(base, size); 1003 1004 found_memory = 1; 1005 1006 if (!hotpluggable) 1007 continue; 1008 1009 if (memblock_mark_hotplug(base, size)) 1010 pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n", 1011 base, base + size); 1012 } 1013 } 1014 return found_memory; 1015 } 1016 1017 int __init early_init_dt_scan_chosen(char *cmdline) 1018 { 1019 int l, node; 1020 const char *p; 1021 const void *rng_seed; 1022 const void *fdt = initial_boot_params; 1023 1024 node = fdt_path_offset(fdt, "/chosen"); 1025 if (node < 0) 1026 node = fdt_path_offset(fdt, "/chosen@0"); 1027 if (node < 0) 1028 /* Handle the cmdline config options even if no /chosen node */ 1029 goto handle_cmdline; 1030 1031 chosen_node_offset = node; 1032 1033 early_init_dt_check_for_initrd(node); 1034 early_init_dt_check_for_elfcorehdr(node); 1035 1036 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); 1037 if (rng_seed && l > 0) { 1038 add_bootloader_randomness(rng_seed, l); 1039 1040 /* try to clear seed so it won't be found. */ 1041 fdt_nop_property(initial_boot_params, node, "rng-seed"); 1042 1043 /* update CRC check value */ 1044 of_fdt_crc32 = crc32_be(~0, initial_boot_params, 1045 fdt_totalsize(initial_boot_params)); 1046 } 1047 1048 /* Retrieve command line */ 1049 p = of_get_flat_dt_prop(node, "bootargs", &l); 1050 if (p != NULL && l > 0) 1051 strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE)); 1052 1053 handle_cmdline: 1054 /* 1055 * CONFIG_CMDLINE is meant to be a default in case nothing else 1056 * managed to set the command line, unless CONFIG_CMDLINE_FORCE 1057 * is set in which case we override whatever was found earlier. 1058 */ 1059 #ifdef CONFIG_CMDLINE 1060 #if defined(CONFIG_CMDLINE_EXTEND) 1061 strlcat(cmdline, " ", COMMAND_LINE_SIZE); 1062 strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1063 #elif defined(CONFIG_CMDLINE_FORCE) 1064 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1065 #else 1066 /* No arguments from boot loader, use kernel's cmdl*/ 1067 if (!((char *)cmdline)[0]) 1068 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1069 #endif 1070 #endif /* CONFIG_CMDLINE */ 1071 1072 pr_debug("Command line is: %s\n", (char *)cmdline); 1073 1074 return 0; 1075 } 1076 1077 #ifndef MIN_MEMBLOCK_ADDR 1078 #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) 1079 #endif 1080 #ifndef MAX_MEMBLOCK_ADDR 1081 #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) 1082 #endif 1083 1084 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) 1085 { 1086 const u64 phys_offset = MIN_MEMBLOCK_ADDR; 1087 1088 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { 1089 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", 1090 base, base + size); 1091 return; 1092 } 1093 1094 if (!PAGE_ALIGNED(base)) { 1095 size -= PAGE_SIZE - (base & ~PAGE_MASK); 1096 base = PAGE_ALIGN(base); 1097 } 1098 size &= PAGE_MASK; 1099 1100 if (base > MAX_MEMBLOCK_ADDR) { 1101 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", 1102 base, base + size); 1103 return; 1104 } 1105 1106 if (base + size - 1 > MAX_MEMBLOCK_ADDR) { 1107 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", 1108 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); 1109 size = MAX_MEMBLOCK_ADDR - base + 1; 1110 } 1111 1112 if (base + size < phys_offset) { 1113 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", 1114 base, base + size); 1115 return; 1116 } 1117 if (base < phys_offset) { 1118 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", 1119 base, phys_offset); 1120 size -= phys_offset - base; 1121 base = phys_offset; 1122 } 1123 memblock_add(base, size); 1124 } 1125 1126 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 1127 { 1128 void *ptr = memblock_alloc(size, align); 1129 1130 if (!ptr) 1131 panic("%s: Failed to allocate %llu bytes align=0x%llx\n", 1132 __func__, size, align); 1133 1134 return ptr; 1135 } 1136 1137 bool __init early_init_dt_verify(void *params) 1138 { 1139 if (!params) 1140 return false; 1141 1142 /* check device tree validity */ 1143 if (fdt_check_header(params)) 1144 return false; 1145 1146 /* Setup flat device-tree pointer */ 1147 initial_boot_params = params; 1148 of_fdt_crc32 = crc32_be(~0, initial_boot_params, 1149 fdt_totalsize(initial_boot_params)); 1150 1151 /* Initialize {size,address}-cells info */ 1152 early_init_dt_scan_root(); 1153 1154 return true; 1155 } 1156 1157 1158 void __init early_init_dt_scan_nodes(void) 1159 { 1160 int rc; 1161 1162 /* Retrieve various information from the /chosen node */ 1163 rc = early_init_dt_scan_chosen(boot_command_line); 1164 if (rc) 1165 pr_warn("No chosen node found, continuing without\n"); 1166 1167 /* Setup memory, calling early_init_dt_add_memory_arch */ 1168 early_init_dt_scan_memory(); 1169 1170 /* Handle linux,usable-memory-range property */ 1171 early_init_dt_check_for_usable_mem_range(); 1172 } 1173 1174 bool __init early_init_dt_scan(void *params) 1175 { 1176 bool status; 1177 1178 status = early_init_dt_verify(params); 1179 if (!status) 1180 return false; 1181 1182 early_init_dt_scan_nodes(); 1183 return true; 1184 } 1185 1186 static void *__init copy_device_tree(void *fdt) 1187 { 1188 int size; 1189 void *dt; 1190 1191 size = fdt_totalsize(fdt); 1192 dt = early_init_dt_alloc_memory_arch(size, 1193 roundup_pow_of_two(FDT_V17_SIZE)); 1194 1195 if (dt) 1196 memcpy(dt, fdt, size); 1197 1198 return dt; 1199 } 1200 1201 /** 1202 * unflatten_device_tree - create tree of device_nodes from flat blob 1203 * 1204 * unflattens the device-tree passed by the firmware, creating the 1205 * tree of struct device_node. It also fills the "name" and "type" 1206 * pointers of the nodes so the normal device-tree walking functions 1207 * can be used. 1208 */ 1209 void __init unflatten_device_tree(void) 1210 { 1211 void *fdt = initial_boot_params; 1212 1213 /* Save the statically-placed regions in the reserved_mem array */ 1214 fdt_scan_reserved_mem_reg_nodes(); 1215 1216 /* Don't use the bootloader provided DTB if ACPI is enabled */ 1217 if (!acpi_disabled) 1218 fdt = NULL; 1219 1220 /* 1221 * Populate an empty root node when ACPI is enabled or bootloader 1222 * doesn't provide one. 1223 */ 1224 if (!fdt) { 1225 fdt = (void *) __dtb_empty_root_begin; 1226 /* fdt_totalsize() will be used for copy size */ 1227 if (fdt_totalsize(fdt) > 1228 __dtb_empty_root_end - __dtb_empty_root_begin) { 1229 pr_err("invalid size in dtb_empty_root\n"); 1230 return; 1231 } 1232 of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt)); 1233 fdt = copy_device_tree(fdt); 1234 } 1235 1236 __unflatten_device_tree(fdt, NULL, &of_root, 1237 early_init_dt_alloc_memory_arch, false); 1238 1239 /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ 1240 of_alias_scan(early_init_dt_alloc_memory_arch); 1241 1242 unittest_unflatten_overlay_base(); 1243 } 1244 1245 /** 1246 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob 1247 * 1248 * Copies and unflattens the device-tree passed by the firmware, creating the 1249 * tree of struct device_node. It also fills the "name" and "type" 1250 * pointers of the nodes so the normal device-tree walking functions 1251 * can be used. This should only be used when the FDT memory has not been 1252 * reserved such is the case when the FDT is built-in to the kernel init 1253 * section. If the FDT memory is reserved already then unflatten_device_tree 1254 * should be used instead. 1255 */ 1256 void __init unflatten_and_copy_device_tree(void) 1257 { 1258 if (initial_boot_params) 1259 initial_boot_params = copy_device_tree(initial_boot_params); 1260 1261 unflatten_device_tree(); 1262 } 1263 1264 #ifdef CONFIG_SYSFS 1265 static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj, 1266 struct bin_attribute *bin_attr, 1267 char *buf, loff_t off, size_t count) 1268 { 1269 memcpy(buf, initial_boot_params + off, count); 1270 return count; 1271 } 1272 1273 static int __init of_fdt_raw_init(void) 1274 { 1275 static struct bin_attribute of_fdt_raw_attr = 1276 __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0); 1277 1278 if (!initial_boot_params) 1279 return 0; 1280 1281 if (of_fdt_crc32 != crc32_be(~0, initial_boot_params, 1282 fdt_totalsize(initial_boot_params))) { 1283 pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n"); 1284 return 0; 1285 } 1286 of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params); 1287 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr); 1288 } 1289 late_initcall(of_fdt_raw_init); 1290 #endif 1291 1292 #endif /* CONFIG_OF_EARLY_FLATTREE */ 1293