1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions for working with the Flattened Device Tree data format 4 * 5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp 6 * benh@kernel.crashing.org 7 */ 8 9 #define pr_fmt(fmt) "OF: fdt: " fmt 10 11 #include <linux/crash_dump.h> 12 #include <linux/crc32.h> 13 #include <linux/kernel.h> 14 #include <linux/initrd.h> 15 #include <linux/memblock.h> 16 #include <linux/mutex.h> 17 #include <linux/of.h> 18 #include <linux/of_fdt.h> 19 #include <linux/sizes.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/slab.h> 23 #include <linux/libfdt.h> 24 #include <linux/debugfs.h> 25 #include <linux/serial_core.h> 26 #include <linux/sysfs.h> 27 #include <linux/random.h> 28 #include <linux/kexec_handover.h> 29 30 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 31 #include <asm/page.h> 32 33 #include "of_private.h" 34 35 /* 36 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by 37 * cmd_wrap_S_dtb in scripts/Makefile.dtbs 38 */ 39 extern uint8_t __dtb_empty_root_begin[]; 40 extern uint8_t __dtb_empty_root_end[]; 41 42 /* 43 * of_fdt_limit_memory - limit the number of regions in the /memory node 44 * @limit: maximum entries 45 * 46 * Adjust the flattened device tree to have at most 'limit' number of 47 * memory entries in the /memory node. This function may be called 48 * any time after initial_boot_param is set. 49 */ 50 void __init of_fdt_limit_memory(int limit) 51 { 52 int memory; 53 int len; 54 const void *val; 55 int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells); 56 57 memory = fdt_path_offset(initial_boot_params, "/memory"); 58 if (memory > 0) { 59 val = fdt_getprop(initial_boot_params, memory, "reg", &len); 60 if (len > limit*cell_size) { 61 len = limit*cell_size; 62 pr_debug("Limiting number of entries to %d\n", limit); 63 fdt_setprop(initial_boot_params, memory, "reg", val, 64 len); 65 } 66 } 67 } 68 69 bool of_fdt_device_is_available(const void *blob, unsigned long node) 70 { 71 const char *status = fdt_getprop(blob, node, "status", NULL); 72 73 if (!status) 74 return true; 75 76 if (!strcmp(status, "ok") || !strcmp(status, "okay")) 77 return true; 78 79 return false; 80 } 81 82 static void *unflatten_dt_alloc(void **mem, unsigned long size, 83 unsigned long align) 84 { 85 void *res; 86 87 *mem = PTR_ALIGN(*mem, align); 88 res = *mem; 89 *mem += size; 90 91 return res; 92 } 93 94 static void populate_properties(const void *blob, 95 int offset, 96 void **mem, 97 struct device_node *np, 98 const char *nodename, 99 bool dryrun) 100 { 101 struct property *pp, **pprev = NULL; 102 int cur; 103 bool has_name = false; 104 105 pprev = &np->properties; 106 for (cur = fdt_first_property_offset(blob, offset); 107 cur >= 0; 108 cur = fdt_next_property_offset(blob, cur)) { 109 const __be32 *val; 110 const char *pname; 111 u32 sz; 112 113 val = fdt_getprop_by_offset(blob, cur, &pname, &sz); 114 if (!val) { 115 pr_warn("Cannot locate property at 0x%x\n", cur); 116 continue; 117 } 118 119 if (!pname) { 120 pr_warn("Cannot find property name at 0x%x\n", cur); 121 continue; 122 } 123 124 if (!strcmp(pname, "name")) 125 has_name = true; 126 127 pp = unflatten_dt_alloc(mem, sizeof(struct property), 128 __alignof__(struct property)); 129 if (dryrun) 130 continue; 131 132 /* We accept flattened tree phandles either in 133 * ePAPR-style "phandle" properties, or the 134 * legacy "linux,phandle" properties. If both 135 * appear and have different values, things 136 * will get weird. Don't do that. 137 */ 138 if (!strcmp(pname, "phandle") || 139 !strcmp(pname, "linux,phandle")) { 140 if (!np->phandle) 141 np->phandle = be32_to_cpup(val); 142 } 143 144 /* And we process the "ibm,phandle" property 145 * used in pSeries dynamic device tree 146 * stuff 147 */ 148 if (!strcmp(pname, "ibm,phandle")) 149 np->phandle = be32_to_cpup(val); 150 151 pp->name = (char *)pname; 152 pp->length = sz; 153 pp->value = (__be32 *)val; 154 *pprev = pp; 155 pprev = &pp->next; 156 } 157 158 /* With version 0x10 we may not have the name property, 159 * recreate it here from the unit name if absent 160 */ 161 if (!has_name) { 162 const char *p = nodename, *ps = p, *pa = NULL; 163 int len; 164 165 while (*p) { 166 if ((*p) == '@') 167 pa = p; 168 else if ((*p) == '/') 169 ps = p + 1; 170 p++; 171 } 172 173 if (pa < ps) 174 pa = p; 175 len = (pa - ps) + 1; 176 pp = unflatten_dt_alloc(mem, sizeof(struct property) + len, 177 __alignof__(struct property)); 178 if (!dryrun) { 179 pp->name = "name"; 180 pp->length = len; 181 pp->value = pp + 1; 182 *pprev = pp; 183 memcpy(pp->value, ps, len - 1); 184 ((char *)pp->value)[len - 1] = 0; 185 pr_debug("fixed up name for %s -> %s\n", 186 nodename, (char *)pp->value); 187 } 188 } 189 } 190 191 static int populate_node(const void *blob, 192 int offset, 193 void **mem, 194 struct device_node *dad, 195 struct device_node **pnp, 196 bool dryrun) 197 { 198 struct device_node *np; 199 const char *pathp; 200 int len; 201 202 pathp = fdt_get_name(blob, offset, &len); 203 if (!pathp) { 204 *pnp = NULL; 205 return len; 206 } 207 208 len++; 209 210 np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len, 211 __alignof__(struct device_node)); 212 if (!dryrun) { 213 char *fn; 214 of_node_init(np); 215 np->full_name = fn = ((char *)np) + sizeof(*np); 216 217 memcpy(fn, pathp, len); 218 219 if (dad != NULL) { 220 np->parent = dad; 221 np->sibling = dad->child; 222 dad->child = np; 223 } 224 } 225 226 populate_properties(blob, offset, mem, np, pathp, dryrun); 227 if (!dryrun) { 228 np->name = of_get_property(np, "name", NULL); 229 if (!np->name) 230 np->name = "<NULL>"; 231 } 232 233 *pnp = np; 234 return 0; 235 } 236 237 static void reverse_nodes(struct device_node *parent) 238 { 239 struct device_node *child, *next; 240 241 /* In-depth first */ 242 child = parent->child; 243 while (child) { 244 reverse_nodes(child); 245 246 child = child->sibling; 247 } 248 249 /* Reverse the nodes in the child list */ 250 child = parent->child; 251 parent->child = NULL; 252 while (child) { 253 next = child->sibling; 254 255 child->sibling = parent->child; 256 parent->child = child; 257 child = next; 258 } 259 } 260 261 /** 262 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree 263 * @blob: The parent device tree blob 264 * @mem: Memory chunk to use for allocating device nodes and properties 265 * @dad: Parent struct device_node 266 * @nodepp: The device_node tree created by the call 267 * 268 * Return: The size of unflattened device tree or error code 269 */ 270 static int unflatten_dt_nodes(const void *blob, 271 void *mem, 272 struct device_node *dad, 273 struct device_node **nodepp) 274 { 275 struct device_node *root; 276 int offset = 0, depth = 0, initial_depth = 0; 277 #define FDT_MAX_DEPTH 64 278 struct device_node *nps[FDT_MAX_DEPTH]; 279 void *base = mem; 280 bool dryrun = !base; 281 int ret; 282 283 if (nodepp) 284 *nodepp = NULL; 285 286 /* 287 * We're unflattening device sub-tree if @dad is valid. There are 288 * possibly multiple nodes in the first level of depth. We need 289 * set @depth to 1 to make fdt_next_node() happy as it bails 290 * immediately when negative @depth is found. Otherwise, the device 291 * nodes except the first one won't be unflattened successfully. 292 */ 293 if (dad) 294 depth = initial_depth = 1; 295 296 root = dad; 297 nps[depth] = dad; 298 299 for (offset = 0; 300 offset >= 0 && depth >= initial_depth; 301 offset = fdt_next_node(blob, offset, &depth)) { 302 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) 303 continue; 304 305 if (!IS_ENABLED(CONFIG_OF_KOBJ) && 306 !of_fdt_device_is_available(blob, offset)) 307 continue; 308 309 ret = populate_node(blob, offset, &mem, nps[depth], 310 &nps[depth+1], dryrun); 311 if (ret < 0) 312 return ret; 313 314 if (!dryrun && nodepp && !*nodepp) 315 *nodepp = nps[depth+1]; 316 if (!dryrun && !root) 317 root = nps[depth+1]; 318 } 319 320 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) { 321 pr_err("Error %d processing FDT\n", offset); 322 return -EINVAL; 323 } 324 325 /* 326 * Reverse the child list. Some drivers assumes node order matches .dts 327 * node order 328 */ 329 if (!dryrun) 330 reverse_nodes(root); 331 332 return mem - base; 333 } 334 335 /** 336 * __unflatten_device_tree - create tree of device_nodes from flat blob 337 * @blob: The blob to expand 338 * @dad: Parent device node 339 * @mynodes: The device_node tree created by the call 340 * @dt_alloc: An allocator that provides a virtual address to memory 341 * for the resulting tree 342 * @detached: if true set OF_DETACHED on @mynodes 343 * 344 * unflattens a device-tree, creating the tree of struct device_node. It also 345 * fills the "name" and "type" pointers of the nodes so the normal device-tree 346 * walking functions can be used. 347 * 348 * Return: NULL on failure or the memory chunk containing the unflattened 349 * device tree on success. 350 */ 351 void *__unflatten_device_tree(const void *blob, 352 struct device_node *dad, 353 struct device_node **mynodes, 354 void *(*dt_alloc)(u64 size, u64 align), 355 bool detached) 356 { 357 int size; 358 void *mem; 359 int ret; 360 361 if (mynodes) 362 *mynodes = NULL; 363 364 pr_debug(" -> unflatten_device_tree()\n"); 365 366 if (!blob) { 367 pr_debug("No device tree pointer\n"); 368 return NULL; 369 } 370 371 pr_debug("Unflattening device tree:\n"); 372 pr_debug("magic: %08x\n", fdt_magic(blob)); 373 pr_debug("size: %08x\n", fdt_totalsize(blob)); 374 pr_debug("version: %08x\n", fdt_version(blob)); 375 376 if (fdt_check_header(blob)) { 377 pr_err("Invalid device tree blob header\n"); 378 return NULL; 379 } 380 381 /* First pass, scan for size */ 382 size = unflatten_dt_nodes(blob, NULL, dad, NULL); 383 if (size <= 0) 384 return NULL; 385 386 size = ALIGN(size, 4); 387 pr_debug(" size is %d, allocating...\n", size); 388 389 /* Allocate memory for the expanded device tree */ 390 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 391 if (!mem) 392 return NULL; 393 394 memset(mem, 0, size); 395 396 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); 397 398 pr_debug(" unflattening %p...\n", mem); 399 400 /* Second pass, do actual unflattening */ 401 ret = unflatten_dt_nodes(blob, mem, dad, mynodes); 402 403 if (be32_to_cpup(mem + size) != 0xdeadbeef) 404 pr_warn("End of tree marker overwritten: %08x\n", 405 be32_to_cpup(mem + size)); 406 407 if (ret <= 0) 408 return NULL; 409 410 if (detached && mynodes && *mynodes) { 411 of_node_set_flag(*mynodes, OF_DETACHED); 412 pr_debug("unflattened tree is detached\n"); 413 } 414 415 pr_debug(" <- unflatten_device_tree()\n"); 416 return mem; 417 } 418 419 static void *kernel_tree_alloc(u64 size, u64 align) 420 { 421 return kzalloc(size, GFP_KERNEL); 422 } 423 424 static DEFINE_MUTEX(of_fdt_unflatten_mutex); 425 426 /** 427 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob 428 * @blob: Flat device tree blob 429 * @dad: Parent device node 430 * @mynodes: The device tree created by the call 431 * 432 * unflattens the device-tree passed by the firmware, creating the 433 * tree of struct device_node. It also fills the "name" and "type" 434 * pointers of the nodes so the normal device-tree walking functions 435 * can be used. 436 * 437 * Return: NULL on failure or the memory chunk containing the unflattened 438 * device tree on success. 439 */ 440 void *of_fdt_unflatten_tree(const unsigned long *blob, 441 struct device_node *dad, 442 struct device_node **mynodes) 443 { 444 void *mem; 445 446 mutex_lock(&of_fdt_unflatten_mutex); 447 mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc, 448 true); 449 mutex_unlock(&of_fdt_unflatten_mutex); 450 451 return mem; 452 } 453 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); 454 455 /* Everything below here references initial_boot_params directly. */ 456 int __initdata dt_root_addr_cells; 457 int __initdata dt_root_size_cells; 458 459 void *initial_boot_params __ro_after_init; 460 phys_addr_t initial_boot_params_pa __ro_after_init; 461 462 #ifdef CONFIG_OF_EARLY_FLATTREE 463 464 static u32 of_fdt_crc32; 465 466 /* 467 * fdt_reserve_elfcorehdr() - reserves memory for elf core header 468 * 469 * This function reserves the memory occupied by an elf core header 470 * described in the device tree. This region contains all the 471 * information about primary kernel's core image and is used by a dump 472 * capture kernel to access the system memory on primary kernel. 473 */ 474 static void __init fdt_reserve_elfcorehdr(void) 475 { 476 if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size) 477 return; 478 479 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { 480 pr_warn("elfcorehdr is overlapped\n"); 481 return; 482 } 483 484 memblock_reserve(elfcorehdr_addr, elfcorehdr_size); 485 486 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n", 487 elfcorehdr_size >> 10, elfcorehdr_addr); 488 } 489 490 /** 491 * early_init_fdt_scan_reserved_mem() - create reserved memory regions 492 * 493 * This function grabs memory from early allocator for device exclusive use 494 * defined in device tree structures. It should be called by arch specific code 495 * once the early allocator (i.e. memblock) has been fully activated. 496 */ 497 void __init early_init_fdt_scan_reserved_mem(void) 498 { 499 int n; 500 int res; 501 u64 base, size; 502 503 if (!initial_boot_params) 504 return; 505 506 fdt_scan_reserved_mem(); 507 fdt_reserve_elfcorehdr(); 508 509 /* Process header /memreserve/ fields */ 510 for (n = 0; ; n++) { 511 res = fdt_get_mem_rsv(initial_boot_params, n, &base, &size); 512 if (res) { 513 pr_err("Invalid memory reservation block index %d\n", n); 514 break; 515 } 516 if (!size) 517 break; 518 memblock_reserve(base, size); 519 } 520 } 521 522 /** 523 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob 524 */ 525 void __init early_init_fdt_reserve_self(void) 526 { 527 if (!initial_boot_params) 528 return; 529 530 /* Reserve the dtb region */ 531 memblock_reserve(__pa(initial_boot_params), 532 fdt_totalsize(initial_boot_params)); 533 } 534 535 /** 536 * of_scan_flat_dt - scan flattened tree blob and call callback on each. 537 * @it: callback function 538 * @data: context data pointer 539 * 540 * This function is used to scan the flattened device-tree, it is 541 * used to extract the memory information at boot before we can 542 * unflatten the tree 543 */ 544 int __init of_scan_flat_dt(int (*it)(unsigned long node, 545 const char *uname, int depth, 546 void *data), 547 void *data) 548 { 549 const void *blob = initial_boot_params; 550 const char *pathp; 551 int offset, rc = 0, depth = -1; 552 553 if (!blob) 554 return 0; 555 556 for (offset = fdt_next_node(blob, -1, &depth); 557 offset >= 0 && depth >= 0 && !rc; 558 offset = fdt_next_node(blob, offset, &depth)) { 559 560 pathp = fdt_get_name(blob, offset, NULL); 561 rc = it(offset, pathp, depth, data); 562 } 563 return rc; 564 } 565 566 /** 567 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each. 568 * @parent: parent node 569 * @it: callback function 570 * @data: context data pointer 571 * 572 * This function is used to scan sub-nodes of a node. 573 */ 574 int __init of_scan_flat_dt_subnodes(unsigned long parent, 575 int (*it)(unsigned long node, 576 const char *uname, 577 void *data), 578 void *data) 579 { 580 const void *blob = initial_boot_params; 581 int node; 582 583 fdt_for_each_subnode(node, blob, parent) { 584 const char *pathp; 585 int rc; 586 587 pathp = fdt_get_name(blob, node, NULL); 588 rc = it(node, pathp, data); 589 if (rc) 590 return rc; 591 } 592 return 0; 593 } 594 595 /** 596 * of_get_flat_dt_subnode_by_name - get the subnode by given name 597 * 598 * @node: the parent node 599 * @uname: the name of subnode 600 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none 601 */ 602 603 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname) 604 { 605 return fdt_subnode_offset(initial_boot_params, node, uname); 606 } 607 608 /* 609 * of_get_flat_dt_root - find the root node in the flat blob 610 */ 611 unsigned long __init of_get_flat_dt_root(void) 612 { 613 return 0; 614 } 615 616 /* 617 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr 618 * 619 * This function can be used within scan_flattened_dt callback to get 620 * access to properties 621 */ 622 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name, 623 int *size) 624 { 625 return fdt_getprop(initial_boot_params, node, name, size); 626 } 627 628 const __be32 *__init of_flat_dt_get_addr_size_prop(unsigned long node, 629 const char *name, 630 int *entries) 631 { 632 const __be32 *prop; 633 int len, elen = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 634 635 prop = of_get_flat_dt_prop(node, name, &len); 636 if (!prop || len % elen) { 637 *entries = 0; 638 return NULL; 639 } 640 641 *entries = len / elen; 642 return prop; 643 } 644 645 bool __init of_flat_dt_get_addr_size(unsigned long node, const char *name, 646 u64 *addr, u64 *size) 647 { 648 const __be32 *prop; 649 int entries; 650 651 prop = of_flat_dt_get_addr_size_prop(node, name, &entries); 652 if (!prop || entries != 1) 653 return false; 654 655 of_flat_dt_read_addr_size(prop, 0, addr, size); 656 return true; 657 } 658 659 void __init of_flat_dt_read_addr_size(const __be32 *prop, int entry_index, 660 u64 *addr, u64 *size) 661 { 662 int entry_cells = dt_root_addr_cells + dt_root_size_cells; 663 prop += entry_cells * entry_index; 664 665 *addr = dt_mem_next_cell(dt_root_addr_cells, &prop); 666 *size = dt_mem_next_cell(dt_root_size_cells, &prop); 667 } 668 669 /** 670 * of_fdt_is_compatible - Return true if given node from the given blob has 671 * compat in its compatible list 672 * @blob: A device tree blob 673 * @node: node to test 674 * @compat: compatible string to compare with compatible list. 675 * 676 * Return: a non-zero value on match with smaller values returned for more 677 * specific compatible values. 678 */ 679 static int of_fdt_is_compatible(const void *blob, 680 unsigned long node, const char *compat) 681 { 682 const char *cp; 683 int cplen; 684 unsigned long l, score = 0; 685 686 cp = fdt_getprop(blob, node, "compatible", &cplen); 687 if (cp == NULL) 688 return 0; 689 while (cplen > 0) { 690 score++; 691 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) 692 return score; 693 l = strlen(cp) + 1; 694 cp += l; 695 cplen -= l; 696 } 697 698 return 0; 699 } 700 701 /** 702 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list 703 * @node: node to test 704 * @compat: compatible string to compare with compatible list. 705 */ 706 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 707 { 708 return of_fdt_is_compatible(initial_boot_params, node, compat); 709 } 710 711 /* 712 * of_flat_dt_match - Return true if node matches a list of compatible values 713 */ 714 static int __init of_flat_dt_match(unsigned long node, const char *const *compat) 715 { 716 unsigned int tmp, score = 0; 717 718 if (!compat) 719 return 0; 720 721 while (*compat) { 722 tmp = of_fdt_is_compatible(initial_boot_params, node, *compat); 723 if (tmp && (score == 0 || (tmp < score))) 724 score = tmp; 725 compat++; 726 } 727 728 return score; 729 } 730 731 /* 732 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle 733 */ 734 uint32_t __init of_get_flat_dt_phandle(unsigned long node) 735 { 736 return fdt_get_phandle(initial_boot_params, node); 737 } 738 739 const char * __init of_flat_dt_get_machine_name(void) 740 { 741 const char *name; 742 unsigned long dt_root = of_get_flat_dt_root(); 743 744 name = of_get_flat_dt_prop(dt_root, "model", NULL); 745 if (!name) 746 name = of_get_flat_dt_prop(dt_root, "compatible", NULL); 747 return name; 748 } 749 750 /** 751 * of_flat_dt_match_machine - Iterate match tables to find matching machine. 752 * 753 * @default_match: A machine specific ptr to return in case of no match. 754 * @get_next_compat: callback function to return next compatible match table. 755 * 756 * Iterate through machine match tables to find the best match for the machine 757 * compatible string in the FDT. 758 */ 759 const void * __init of_flat_dt_match_machine(const void *default_match, 760 const void * (*get_next_compat)(const char * const**)) 761 { 762 const void *data = NULL; 763 const void *best_data = default_match; 764 const char *const *compat; 765 unsigned long dt_root; 766 unsigned int best_score = ~1, score = 0; 767 768 dt_root = of_get_flat_dt_root(); 769 while ((data = get_next_compat(&compat))) { 770 score = of_flat_dt_match(dt_root, compat); 771 if (score > 0 && score < best_score) { 772 best_data = data; 773 best_score = score; 774 } 775 } 776 if (!best_data) { 777 const char *prop; 778 int size; 779 780 pr_err("\n unrecognized device tree list:\n[ "); 781 782 prop = of_get_flat_dt_prop(dt_root, "compatible", &size); 783 if (prop) { 784 while (size > 0) { 785 printk("'%s' ", prop); 786 size -= strlen(prop) + 1; 787 prop += strlen(prop) + 1; 788 } 789 } 790 printk("]\n\n"); 791 return NULL; 792 } 793 794 pr_info("Machine model: %s\n", of_flat_dt_get_machine_name()); 795 796 return best_data; 797 } 798 799 static void __early_init_dt_declare_initrd(unsigned long start, 800 unsigned long end) 801 { 802 /* 803 * __va() is not yet available this early on some platforms. In that 804 * case, the platform uses phys_initrd_start/phys_initrd_size instead 805 * and does the VA conversion itself. 806 */ 807 if (!IS_ENABLED(CONFIG_ARM64) && 808 !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) { 809 initrd_start = (unsigned long)__va(start); 810 initrd_end = (unsigned long)__va(end); 811 initrd_below_start_ok = 1; 812 } 813 } 814 815 /** 816 * early_init_dt_check_for_initrd - Decode initrd location from flat tree 817 * @node: reference to node containing initrd location ('chosen') 818 */ 819 static void __init early_init_dt_check_for_initrd(unsigned long node) 820 { 821 u64 start, end; 822 int len; 823 const __be32 *prop; 824 825 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 826 return; 827 828 pr_debug("Looking for initrd properties... "); 829 830 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len); 831 if (!prop) 832 return; 833 start = of_read_number(prop, len/4); 834 835 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len); 836 if (!prop) 837 return; 838 end = of_read_number(prop, len/4); 839 if (start > end) 840 return; 841 842 __early_init_dt_declare_initrd(start, end); 843 phys_initrd_start = start; 844 phys_initrd_size = end - start; 845 846 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end); 847 } 848 849 /** 850 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat 851 * tree 852 * @node: reference to node containing elfcorehdr location ('chosen') 853 */ 854 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node) 855 { 856 if (!IS_ENABLED(CONFIG_CRASH_DUMP)) 857 return; 858 859 pr_debug("Looking for elfcorehdr property... "); 860 861 if (!of_flat_dt_get_addr_size(node, "linux,elfcorehdr", 862 &elfcorehdr_addr, &elfcorehdr_size)) 863 return; 864 865 pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n", 866 elfcorehdr_addr, elfcorehdr_size); 867 } 868 869 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND; 870 871 /* 872 * The main usage of linux,usable-memory-range is for crash dump kernel. 873 * Originally, the number of usable-memory regions is one. Now there may 874 * be two regions, low region and high region. 875 * To make compatibility with existing user-space and older kdump, the low 876 * region is always the last range of linux,usable-memory-range if exist. 877 */ 878 #define MAX_USABLE_RANGES 2 879 880 /** 881 * early_init_dt_check_for_usable_mem_range - Decode usable memory range 882 * location from flat tree 883 */ 884 void __init early_init_dt_check_for_usable_mem_range(void) 885 { 886 struct memblock_region rgn[MAX_USABLE_RANGES] = {0}; 887 const __be32 *prop; 888 int len, i; 889 u64 base, size; 890 unsigned long node = chosen_node_offset; 891 892 if ((long)node < 0) 893 return; 894 895 pr_debug("Looking for usable-memory-range property... "); 896 897 prop = of_flat_dt_get_addr_size_prop(node, "linux,usable-memory-range", 898 &len); 899 if (!prop) 900 return; 901 902 len = min(len, MAX_USABLE_RANGES); 903 904 for (i = 0; i < len; i++) { 905 of_flat_dt_read_addr_size(prop, i, &base, &size); 906 rgn[i].base = base; 907 rgn[i].size = size; 908 909 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n", 910 i, &rgn[i].base, &rgn[i].size); 911 } 912 913 memblock_cap_memory_range(rgn[0].base, rgn[0].size); 914 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++) 915 memblock_add(rgn[i].base, rgn[i].size); 916 } 917 918 /** 919 * early_init_dt_check_kho - Decode info required for kexec handover from DT 920 */ 921 static void __init early_init_dt_check_kho(void) 922 { 923 unsigned long node = chosen_node_offset; 924 u64 fdt_start, fdt_size, scratch_start, scratch_size; 925 926 if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER) || (long)node < 0) 927 return; 928 929 if (!of_flat_dt_get_addr_size(node, "linux,kho-fdt", 930 &fdt_start, &fdt_size)) 931 return; 932 933 if (!of_flat_dt_get_addr_size(node, "linux,kho-scratch", 934 &scratch_start, &scratch_size)) 935 return; 936 937 kho_populate(fdt_start, fdt_size, scratch_start, scratch_size); 938 } 939 940 #ifdef CONFIG_SERIAL_EARLYCON 941 942 int __init early_init_dt_scan_chosen_stdout(void) 943 { 944 int offset; 945 const char *p, *q, *options = NULL; 946 int l; 947 const struct earlycon_id *match; 948 const void *fdt = initial_boot_params; 949 int ret; 950 951 offset = fdt_path_offset(fdt, "/chosen"); 952 if (offset < 0) 953 offset = fdt_path_offset(fdt, "/chosen@0"); 954 if (offset < 0) 955 return -ENOENT; 956 957 p = fdt_getprop(fdt, offset, "stdout-path", &l); 958 if (!p) 959 p = fdt_getprop(fdt, offset, "linux,stdout-path", &l); 960 if (!p || !l) 961 return -ENOENT; 962 963 q = strchrnul(p, ':'); 964 if (*q != '\0') 965 options = q + 1; 966 l = q - p; 967 968 /* Get the node specified by stdout-path */ 969 offset = fdt_path_offset_namelen(fdt, p, l); 970 if (offset < 0) { 971 pr_warn("earlycon: stdout-path %.*s not found\n", l, p); 972 return 0; 973 } 974 975 for (match = __earlycon_table; match < __earlycon_table_end; match++) { 976 if (!match->compatible[0]) 977 continue; 978 979 if (fdt_node_check_compatible(fdt, offset, match->compatible)) 980 continue; 981 982 ret = of_setup_earlycon(match, offset, options); 983 if (!ret || ret == -EALREADY) 984 return 0; 985 } 986 return -ENODEV; 987 } 988 #endif 989 990 /* 991 * early_init_dt_scan_root - fetch the top level address and size cells 992 */ 993 int __init early_init_dt_scan_root(void) 994 { 995 const __be32 *prop; 996 const void *fdt = initial_boot_params; 997 int node = fdt_path_offset(fdt, "/"); 998 999 if (node < 0) 1000 return -ENODEV; 1001 1002 dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 1003 dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 1004 1005 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 1006 if (!WARN(!prop, "No '#size-cells' in root node\n")) 1007 dt_root_size_cells = be32_to_cpup(prop); 1008 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); 1009 1010 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 1011 if (!WARN(!prop, "No '#address-cells' in root node\n")) 1012 dt_root_addr_cells = be32_to_cpup(prop); 1013 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1014 1015 return 0; 1016 } 1017 1018 u64 __init dt_mem_next_cell(int s, const __be32 **cellp) 1019 { 1020 const __be32 *p = *cellp; 1021 1022 *cellp = p + s; 1023 return of_read_number(p, s); 1024 } 1025 1026 /* 1027 * early_init_dt_scan_memory - Look for and parse memory nodes 1028 */ 1029 int __init early_init_dt_scan_memory(void) 1030 { 1031 int node, found_memory = 0; 1032 const void *fdt = initial_boot_params; 1033 1034 fdt_for_each_subnode(node, fdt, 0) { 1035 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 1036 const __be32 *reg; 1037 int i, l; 1038 bool hotpluggable; 1039 1040 /* We are scanning "memory" nodes only */ 1041 if (type == NULL || strcmp(type, "memory") != 0) 1042 continue; 1043 1044 if (!of_fdt_device_is_available(fdt, node)) 1045 continue; 1046 1047 reg = of_flat_dt_get_addr_size_prop(node, "linux,usable-memory", &l); 1048 if (reg == NULL) 1049 reg = of_flat_dt_get_addr_size_prop(node, "reg", &l); 1050 if (reg == NULL) 1051 continue; 1052 1053 hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL); 1054 1055 pr_debug("memory scan node %s, reg {addr,size} entries %d,\n", 1056 fdt_get_name(fdt, node, NULL), l); 1057 1058 for (i = 0; i < l; i++) { 1059 u64 base, size; 1060 1061 of_flat_dt_read_addr_size(reg, i, &base, &size); 1062 1063 if (size == 0) 1064 continue; 1065 pr_debug(" - %llx, %llx\n", base, size); 1066 1067 early_init_dt_add_memory_arch(base, size); 1068 1069 found_memory = 1; 1070 1071 if (!hotpluggable) 1072 continue; 1073 1074 if (memblock_mark_hotplug(base, size)) 1075 pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n", 1076 base, base + size); 1077 } 1078 } 1079 return found_memory; 1080 } 1081 1082 int __init early_init_dt_scan_chosen(char *cmdline) 1083 { 1084 int l, node; 1085 const char *p; 1086 const void *rng_seed; 1087 const void *fdt = initial_boot_params; 1088 1089 node = fdt_path_offset(fdt, "/chosen"); 1090 if (node < 0) 1091 node = fdt_path_offset(fdt, "/chosen@0"); 1092 if (node < 0) 1093 /* Handle the cmdline config options even if no /chosen node */ 1094 goto handle_cmdline; 1095 1096 chosen_node_offset = node; 1097 1098 early_init_dt_check_for_initrd(node); 1099 early_init_dt_check_for_elfcorehdr(node); 1100 1101 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); 1102 if (rng_seed && l > 0) { 1103 add_bootloader_randomness(rng_seed, l); 1104 1105 /* try to clear seed so it won't be found. */ 1106 fdt_nop_property(initial_boot_params, node, "rng-seed"); 1107 1108 /* update CRC check value */ 1109 of_fdt_crc32 = crc32_be(~0, initial_boot_params, 1110 fdt_totalsize(initial_boot_params)); 1111 } 1112 1113 /* Retrieve command line */ 1114 p = of_get_flat_dt_prop(node, "bootargs", &l); 1115 if (p != NULL && l > 0) 1116 strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE)); 1117 1118 handle_cmdline: 1119 /* 1120 * CONFIG_CMDLINE is meant to be a default in case nothing else 1121 * managed to set the command line, unless CONFIG_CMDLINE_FORCE 1122 * is set in which case we override whatever was found earlier. 1123 */ 1124 #ifdef CONFIG_CMDLINE 1125 #if defined(CONFIG_CMDLINE_EXTEND) 1126 strlcat(cmdline, " ", COMMAND_LINE_SIZE); 1127 strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1128 #elif defined(CONFIG_CMDLINE_FORCE) 1129 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1130 #else 1131 /* No arguments from boot loader, use kernel's cmdl*/ 1132 if (!((char *)cmdline)[0]) 1133 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1134 #endif 1135 #endif /* CONFIG_CMDLINE */ 1136 1137 pr_debug("Command line is: %s\n", (char *)cmdline); 1138 1139 return 0; 1140 } 1141 1142 #ifndef MIN_MEMBLOCK_ADDR 1143 #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) 1144 #endif 1145 #ifndef MAX_MEMBLOCK_ADDR 1146 #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) 1147 #endif 1148 1149 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) 1150 { 1151 const u64 phys_offset = MIN_MEMBLOCK_ADDR; 1152 1153 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { 1154 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", 1155 base, base + size); 1156 return; 1157 } 1158 1159 if (!PAGE_ALIGNED(base)) { 1160 size -= PAGE_SIZE - (base & ~PAGE_MASK); 1161 base = PAGE_ALIGN(base); 1162 } 1163 size &= PAGE_MASK; 1164 1165 if (base > MAX_MEMBLOCK_ADDR) { 1166 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", 1167 base, base + size); 1168 return; 1169 } 1170 1171 if (base + size - 1 > MAX_MEMBLOCK_ADDR) { 1172 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", 1173 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); 1174 size = MAX_MEMBLOCK_ADDR - base + 1; 1175 } 1176 1177 if (base + size < phys_offset) { 1178 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", 1179 base, base + size); 1180 return; 1181 } 1182 if (base < phys_offset) { 1183 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", 1184 base, phys_offset); 1185 size -= phys_offset - base; 1186 base = phys_offset; 1187 } 1188 memblock_add(base, size); 1189 } 1190 1191 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 1192 { 1193 return memblock_alloc_or_panic(size, align); 1194 } 1195 1196 bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys) 1197 { 1198 if (!dt_virt) 1199 return false; 1200 1201 /* check device tree validity */ 1202 if (fdt_check_header(dt_virt)) 1203 return false; 1204 1205 /* Setup flat device-tree pointer */ 1206 initial_boot_params = dt_virt; 1207 initial_boot_params_pa = dt_phys; 1208 of_fdt_crc32 = crc32_be(~0, initial_boot_params, 1209 fdt_totalsize(initial_boot_params)); 1210 1211 /* Initialize {size,address}-cells info */ 1212 early_init_dt_scan_root(); 1213 1214 return true; 1215 } 1216 1217 1218 void __init early_init_dt_scan_nodes(void) 1219 { 1220 int rc; 1221 1222 /* Retrieve various information from the /chosen node */ 1223 rc = early_init_dt_scan_chosen(boot_command_line); 1224 if (rc) 1225 pr_warn("No chosen node found, continuing without\n"); 1226 1227 /* Setup memory, calling early_init_dt_add_memory_arch */ 1228 early_init_dt_scan_memory(); 1229 1230 /* Handle linux,usable-memory-range property */ 1231 early_init_dt_check_for_usable_mem_range(); 1232 1233 /* Handle kexec handover */ 1234 early_init_dt_check_kho(); 1235 } 1236 1237 bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys) 1238 { 1239 bool status; 1240 1241 status = early_init_dt_verify(dt_virt, dt_phys); 1242 if (!status) 1243 return false; 1244 1245 early_init_dt_scan_nodes(); 1246 return true; 1247 } 1248 1249 static void *__init copy_device_tree(void *fdt) 1250 { 1251 int size; 1252 void *dt; 1253 1254 size = fdt_totalsize(fdt); 1255 dt = early_init_dt_alloc_memory_arch(size, 1256 roundup_pow_of_two(FDT_V17_SIZE)); 1257 1258 if (dt) 1259 memcpy(dt, fdt, size); 1260 1261 return dt; 1262 } 1263 1264 /** 1265 * unflatten_device_tree - create tree of device_nodes from flat blob 1266 * 1267 * unflattens the device-tree passed by the firmware, creating the 1268 * tree of struct device_node. It also fills the "name" and "type" 1269 * pointers of the nodes so the normal device-tree walking functions 1270 * can be used. 1271 */ 1272 void __init unflatten_device_tree(void) 1273 { 1274 void *fdt = initial_boot_params; 1275 1276 /* Save the statically-placed regions in the reserved_mem array */ 1277 fdt_scan_reserved_mem_reg_nodes(); 1278 1279 /* Populate an empty root node when bootloader doesn't provide one */ 1280 if (!fdt) { 1281 fdt = (void *) __dtb_empty_root_begin; 1282 /* fdt_totalsize() will be used for copy size */ 1283 if (fdt_totalsize(fdt) > 1284 __dtb_empty_root_end - __dtb_empty_root_begin) { 1285 pr_err("invalid size in dtb_empty_root\n"); 1286 return; 1287 } 1288 of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt)); 1289 fdt = copy_device_tree(fdt); 1290 } 1291 1292 __unflatten_device_tree(fdt, NULL, &of_root, 1293 early_init_dt_alloc_memory_arch, false); 1294 1295 /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ 1296 of_alias_scan(early_init_dt_alloc_memory_arch); 1297 1298 unittest_unflatten_overlay_base(); 1299 } 1300 1301 /** 1302 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob 1303 * 1304 * Copies and unflattens the device-tree passed by the firmware, creating the 1305 * tree of struct device_node. It also fills the "name" and "type" 1306 * pointers of the nodes so the normal device-tree walking functions 1307 * can be used. This should only be used when the FDT memory has not been 1308 * reserved such is the case when the FDT is built-in to the kernel init 1309 * section. If the FDT memory is reserved already then unflatten_device_tree 1310 * should be used instead. 1311 */ 1312 void __init unflatten_and_copy_device_tree(void) 1313 { 1314 if (initial_boot_params) 1315 initial_boot_params = copy_device_tree(initial_boot_params); 1316 1317 unflatten_device_tree(); 1318 } 1319 1320 #ifdef CONFIG_SYSFS 1321 static int __init of_fdt_raw_init(void) 1322 { 1323 static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(fdt); 1324 1325 if (!initial_boot_params) 1326 return 0; 1327 1328 if (of_fdt_crc32 != crc32_be(~0, initial_boot_params, 1329 fdt_totalsize(initial_boot_params))) { 1330 pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n"); 1331 return 0; 1332 } 1333 bin_attr_fdt.private = initial_boot_params; 1334 bin_attr_fdt.size = fdt_totalsize(initial_boot_params); 1335 return sysfs_create_bin_file(firmware_kobj, &bin_attr_fdt); 1336 } 1337 late_initcall(of_fdt_raw_init); 1338 #endif 1339 1340 #endif /* CONFIG_OF_EARLY_FLATTREE */ 1341