1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions for working with the Flattened Device Tree data format
4 *
5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
6 * benh@kernel.crashing.org
7 */
8
9 #define pr_fmt(fmt) "OF: fdt: " fmt
10
11 #include <linux/acpi.h>
12 #include <linux/crash_dump.h>
13 #include <linux/crc32.h>
14 #include <linux/kernel.h>
15 #include <linux/initrd.h>
16 #include <linux/memblock.h>
17 #include <linux/mutex.h>
18 #include <linux/of.h>
19 #include <linux/of_fdt.h>
20 #include <linux/sizes.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/libfdt.h>
25 #include <linux/debugfs.h>
26 #include <linux/serial_core.h>
27 #include <linux/sysfs.h>
28 #include <linux/random.h>
29
30 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
31 #include <asm/page.h>
32
33 #include "of_private.h"
34
35 /*
36 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
37 * cmd_wrap_S_dtb in scripts/Makefile.dtbs
38 */
39 extern uint8_t __dtb_empty_root_begin[];
40 extern uint8_t __dtb_empty_root_end[];
41
42 /*
43 * of_fdt_limit_memory - limit the number of regions in the /memory node
44 * @limit: maximum entries
45 *
46 * Adjust the flattened device tree to have at most 'limit' number of
47 * memory entries in the /memory node. This function may be called
48 * any time after initial_boot_param is set.
49 */
of_fdt_limit_memory(int limit)50 void __init of_fdt_limit_memory(int limit)
51 {
52 int memory;
53 int len;
54 const void *val;
55 int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells);
56
57 memory = fdt_path_offset(initial_boot_params, "/memory");
58 if (memory > 0) {
59 val = fdt_getprop(initial_boot_params, memory, "reg", &len);
60 if (len > limit*cell_size) {
61 len = limit*cell_size;
62 pr_debug("Limiting number of entries to %d\n", limit);
63 fdt_setprop(initial_boot_params, memory, "reg", val,
64 len);
65 }
66 }
67 }
68
of_fdt_device_is_available(const void * blob,unsigned long node)69 bool of_fdt_device_is_available(const void *blob, unsigned long node)
70 {
71 const char *status = fdt_getprop(blob, node, "status", NULL);
72
73 if (!status)
74 return true;
75
76 if (!strcmp(status, "ok") || !strcmp(status, "okay"))
77 return true;
78
79 return false;
80 }
81
unflatten_dt_alloc(void ** mem,unsigned long size,unsigned long align)82 static void *unflatten_dt_alloc(void **mem, unsigned long size,
83 unsigned long align)
84 {
85 void *res;
86
87 *mem = PTR_ALIGN(*mem, align);
88 res = *mem;
89 *mem += size;
90
91 return res;
92 }
93
populate_properties(const void * blob,int offset,void ** mem,struct device_node * np,const char * nodename,bool dryrun)94 static void populate_properties(const void *blob,
95 int offset,
96 void **mem,
97 struct device_node *np,
98 const char *nodename,
99 bool dryrun)
100 {
101 struct property *pp, **pprev = NULL;
102 int cur;
103 bool has_name = false;
104
105 pprev = &np->properties;
106 for (cur = fdt_first_property_offset(blob, offset);
107 cur >= 0;
108 cur = fdt_next_property_offset(blob, cur)) {
109 const __be32 *val;
110 const char *pname;
111 u32 sz;
112
113 val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
114 if (!val) {
115 pr_warn("Cannot locate property at 0x%x\n", cur);
116 continue;
117 }
118
119 if (!pname) {
120 pr_warn("Cannot find property name at 0x%x\n", cur);
121 continue;
122 }
123
124 if (!strcmp(pname, "name"))
125 has_name = true;
126
127 pp = unflatten_dt_alloc(mem, sizeof(struct property),
128 __alignof__(struct property));
129 if (dryrun)
130 continue;
131
132 /* We accept flattened tree phandles either in
133 * ePAPR-style "phandle" properties, or the
134 * legacy "linux,phandle" properties. If both
135 * appear and have different values, things
136 * will get weird. Don't do that.
137 */
138 if (!strcmp(pname, "phandle") ||
139 !strcmp(pname, "linux,phandle")) {
140 if (!np->phandle)
141 np->phandle = be32_to_cpup(val);
142 }
143
144 /* And we process the "ibm,phandle" property
145 * used in pSeries dynamic device tree
146 * stuff
147 */
148 if (!strcmp(pname, "ibm,phandle"))
149 np->phandle = be32_to_cpup(val);
150
151 pp->name = (char *)pname;
152 pp->length = sz;
153 pp->value = (__be32 *)val;
154 *pprev = pp;
155 pprev = &pp->next;
156 }
157
158 /* With version 0x10 we may not have the name property,
159 * recreate it here from the unit name if absent
160 */
161 if (!has_name) {
162 const char *p = nodename, *ps = p, *pa = NULL;
163 int len;
164
165 while (*p) {
166 if ((*p) == '@')
167 pa = p;
168 else if ((*p) == '/')
169 ps = p + 1;
170 p++;
171 }
172
173 if (pa < ps)
174 pa = p;
175 len = (pa - ps) + 1;
176 pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
177 __alignof__(struct property));
178 if (!dryrun) {
179 pp->name = "name";
180 pp->length = len;
181 pp->value = pp + 1;
182 *pprev = pp;
183 memcpy(pp->value, ps, len - 1);
184 ((char *)pp->value)[len - 1] = 0;
185 pr_debug("fixed up name for %s -> %s\n",
186 nodename, (char *)pp->value);
187 }
188 }
189 }
190
populate_node(const void * blob,int offset,void ** mem,struct device_node * dad,struct device_node ** pnp,bool dryrun)191 static int populate_node(const void *blob,
192 int offset,
193 void **mem,
194 struct device_node *dad,
195 struct device_node **pnp,
196 bool dryrun)
197 {
198 struct device_node *np;
199 const char *pathp;
200 int len;
201
202 pathp = fdt_get_name(blob, offset, &len);
203 if (!pathp) {
204 *pnp = NULL;
205 return len;
206 }
207
208 len++;
209
210 np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
211 __alignof__(struct device_node));
212 if (!dryrun) {
213 char *fn;
214 of_node_init(np);
215 np->full_name = fn = ((char *)np) + sizeof(*np);
216
217 memcpy(fn, pathp, len);
218
219 if (dad != NULL) {
220 np->parent = dad;
221 np->sibling = dad->child;
222 dad->child = np;
223 }
224 }
225
226 populate_properties(blob, offset, mem, np, pathp, dryrun);
227 if (!dryrun) {
228 np->name = of_get_property(np, "name", NULL);
229 if (!np->name)
230 np->name = "<NULL>";
231 }
232
233 *pnp = np;
234 return 0;
235 }
236
reverse_nodes(struct device_node * parent)237 static void reverse_nodes(struct device_node *parent)
238 {
239 struct device_node *child, *next;
240
241 /* In-depth first */
242 child = parent->child;
243 while (child) {
244 reverse_nodes(child);
245
246 child = child->sibling;
247 }
248
249 /* Reverse the nodes in the child list */
250 child = parent->child;
251 parent->child = NULL;
252 while (child) {
253 next = child->sibling;
254
255 child->sibling = parent->child;
256 parent->child = child;
257 child = next;
258 }
259 }
260
261 /**
262 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
263 * @blob: The parent device tree blob
264 * @mem: Memory chunk to use for allocating device nodes and properties
265 * @dad: Parent struct device_node
266 * @nodepp: The device_node tree created by the call
267 *
268 * Return: The size of unflattened device tree or error code
269 */
unflatten_dt_nodes(const void * blob,void * mem,struct device_node * dad,struct device_node ** nodepp)270 static int unflatten_dt_nodes(const void *blob,
271 void *mem,
272 struct device_node *dad,
273 struct device_node **nodepp)
274 {
275 struct device_node *root;
276 int offset = 0, depth = 0, initial_depth = 0;
277 #define FDT_MAX_DEPTH 64
278 struct device_node *nps[FDT_MAX_DEPTH];
279 void *base = mem;
280 bool dryrun = !base;
281 int ret;
282
283 if (nodepp)
284 *nodepp = NULL;
285
286 /*
287 * We're unflattening device sub-tree if @dad is valid. There are
288 * possibly multiple nodes in the first level of depth. We need
289 * set @depth to 1 to make fdt_next_node() happy as it bails
290 * immediately when negative @depth is found. Otherwise, the device
291 * nodes except the first one won't be unflattened successfully.
292 */
293 if (dad)
294 depth = initial_depth = 1;
295
296 root = dad;
297 nps[depth] = dad;
298
299 for (offset = 0;
300 offset >= 0 && depth >= initial_depth;
301 offset = fdt_next_node(blob, offset, &depth)) {
302 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
303 continue;
304
305 if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
306 !of_fdt_device_is_available(blob, offset))
307 continue;
308
309 ret = populate_node(blob, offset, &mem, nps[depth],
310 &nps[depth+1], dryrun);
311 if (ret < 0)
312 return ret;
313
314 if (!dryrun && nodepp && !*nodepp)
315 *nodepp = nps[depth+1];
316 if (!dryrun && !root)
317 root = nps[depth+1];
318 }
319
320 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
321 pr_err("Error %d processing FDT\n", offset);
322 return -EINVAL;
323 }
324
325 /*
326 * Reverse the child list. Some drivers assumes node order matches .dts
327 * node order
328 */
329 if (!dryrun)
330 reverse_nodes(root);
331
332 return mem - base;
333 }
334
335 /**
336 * __unflatten_device_tree - create tree of device_nodes from flat blob
337 * @blob: The blob to expand
338 * @dad: Parent device node
339 * @mynodes: The device_node tree created by the call
340 * @dt_alloc: An allocator that provides a virtual address to memory
341 * for the resulting tree
342 * @detached: if true set OF_DETACHED on @mynodes
343 *
344 * unflattens a device-tree, creating the tree of struct device_node. It also
345 * fills the "name" and "type" pointers of the nodes so the normal device-tree
346 * walking functions can be used.
347 *
348 * Return: NULL on failure or the memory chunk containing the unflattened
349 * device tree on success.
350 */
__unflatten_device_tree(const void * blob,struct device_node * dad,struct device_node ** mynodes,void * (* dt_alloc)(u64 size,u64 align),bool detached)351 void *__unflatten_device_tree(const void *blob,
352 struct device_node *dad,
353 struct device_node **mynodes,
354 void *(*dt_alloc)(u64 size, u64 align),
355 bool detached)
356 {
357 int size;
358 void *mem;
359 int ret;
360
361 if (mynodes)
362 *mynodes = NULL;
363
364 pr_debug(" -> unflatten_device_tree()\n");
365
366 if (!blob) {
367 pr_debug("No device tree pointer\n");
368 return NULL;
369 }
370
371 pr_debug("Unflattening device tree:\n");
372 pr_debug("magic: %08x\n", fdt_magic(blob));
373 pr_debug("size: %08x\n", fdt_totalsize(blob));
374 pr_debug("version: %08x\n", fdt_version(blob));
375
376 if (fdt_check_header(blob)) {
377 pr_err("Invalid device tree blob header\n");
378 return NULL;
379 }
380
381 /* First pass, scan for size */
382 size = unflatten_dt_nodes(blob, NULL, dad, NULL);
383 if (size <= 0)
384 return NULL;
385
386 size = ALIGN(size, 4);
387 pr_debug(" size is %d, allocating...\n", size);
388
389 /* Allocate memory for the expanded device tree */
390 mem = dt_alloc(size + 4, __alignof__(struct device_node));
391 if (!mem)
392 return NULL;
393
394 memset(mem, 0, size);
395
396 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
397
398 pr_debug(" unflattening %p...\n", mem);
399
400 /* Second pass, do actual unflattening */
401 ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
402
403 if (be32_to_cpup(mem + size) != 0xdeadbeef)
404 pr_warn("End of tree marker overwritten: %08x\n",
405 be32_to_cpup(mem + size));
406
407 if (ret <= 0)
408 return NULL;
409
410 if (detached && mynodes && *mynodes) {
411 of_node_set_flag(*mynodes, OF_DETACHED);
412 pr_debug("unflattened tree is detached\n");
413 }
414
415 pr_debug(" <- unflatten_device_tree()\n");
416 return mem;
417 }
418
kernel_tree_alloc(u64 size,u64 align)419 static void *kernel_tree_alloc(u64 size, u64 align)
420 {
421 return kzalloc(size, GFP_KERNEL);
422 }
423
424 static DEFINE_MUTEX(of_fdt_unflatten_mutex);
425
426 /**
427 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
428 * @blob: Flat device tree blob
429 * @dad: Parent device node
430 * @mynodes: The device tree created by the call
431 *
432 * unflattens the device-tree passed by the firmware, creating the
433 * tree of struct device_node. It also fills the "name" and "type"
434 * pointers of the nodes so the normal device-tree walking functions
435 * can be used.
436 *
437 * Return: NULL on failure or the memory chunk containing the unflattened
438 * device tree on success.
439 */
of_fdt_unflatten_tree(const unsigned long * blob,struct device_node * dad,struct device_node ** mynodes)440 void *of_fdt_unflatten_tree(const unsigned long *blob,
441 struct device_node *dad,
442 struct device_node **mynodes)
443 {
444 void *mem;
445
446 mutex_lock(&of_fdt_unflatten_mutex);
447 mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
448 true);
449 mutex_unlock(&of_fdt_unflatten_mutex);
450
451 return mem;
452 }
453 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
454
455 /* Everything below here references initial_boot_params directly. */
456 int __initdata dt_root_addr_cells;
457 int __initdata dt_root_size_cells;
458
459 void *initial_boot_params __ro_after_init;
460 phys_addr_t initial_boot_params_pa __ro_after_init;
461
462 #ifdef CONFIG_OF_EARLY_FLATTREE
463
464 static u32 of_fdt_crc32;
465
466 /*
467 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
468 *
469 * This function reserves the memory occupied by an elf core header
470 * described in the device tree. This region contains all the
471 * information about primary kernel's core image and is used by a dump
472 * capture kernel to access the system memory on primary kernel.
473 */
fdt_reserve_elfcorehdr(void)474 static void __init fdt_reserve_elfcorehdr(void)
475 {
476 if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
477 return;
478
479 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
480 pr_warn("elfcorehdr is overlapped\n");
481 return;
482 }
483
484 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
485
486 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
487 elfcorehdr_size >> 10, elfcorehdr_addr);
488 }
489
490 /**
491 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
492 *
493 * This function grabs memory from early allocator for device exclusive use
494 * defined in device tree structures. It should be called by arch specific code
495 * once the early allocator (i.e. memblock) has been fully activated.
496 */
early_init_fdt_scan_reserved_mem(void)497 void __init early_init_fdt_scan_reserved_mem(void)
498 {
499 int n;
500 u64 base, size;
501
502 if (!initial_boot_params)
503 return;
504
505 fdt_scan_reserved_mem();
506 fdt_reserve_elfcorehdr();
507
508 /* Process header /memreserve/ fields */
509 for (n = 0; ; n++) {
510 fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
511 if (!size)
512 break;
513 memblock_reserve(base, size);
514 }
515 }
516
517 /**
518 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
519 */
early_init_fdt_reserve_self(void)520 void __init early_init_fdt_reserve_self(void)
521 {
522 if (!initial_boot_params)
523 return;
524
525 /* Reserve the dtb region */
526 memblock_reserve(__pa(initial_boot_params),
527 fdt_totalsize(initial_boot_params));
528 }
529
530 /**
531 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
532 * @it: callback function
533 * @data: context data pointer
534 *
535 * This function is used to scan the flattened device-tree, it is
536 * used to extract the memory information at boot before we can
537 * unflatten the tree
538 */
of_scan_flat_dt(int (* it)(unsigned long node,const char * uname,int depth,void * data),void * data)539 int __init of_scan_flat_dt(int (*it)(unsigned long node,
540 const char *uname, int depth,
541 void *data),
542 void *data)
543 {
544 const void *blob = initial_boot_params;
545 const char *pathp;
546 int offset, rc = 0, depth = -1;
547
548 if (!blob)
549 return 0;
550
551 for (offset = fdt_next_node(blob, -1, &depth);
552 offset >= 0 && depth >= 0 && !rc;
553 offset = fdt_next_node(blob, offset, &depth)) {
554
555 pathp = fdt_get_name(blob, offset, NULL);
556 rc = it(offset, pathp, depth, data);
557 }
558 return rc;
559 }
560
561 /**
562 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
563 * @parent: parent node
564 * @it: callback function
565 * @data: context data pointer
566 *
567 * This function is used to scan sub-nodes of a node.
568 */
of_scan_flat_dt_subnodes(unsigned long parent,int (* it)(unsigned long node,const char * uname,void * data),void * data)569 int __init of_scan_flat_dt_subnodes(unsigned long parent,
570 int (*it)(unsigned long node,
571 const char *uname,
572 void *data),
573 void *data)
574 {
575 const void *blob = initial_boot_params;
576 int node;
577
578 fdt_for_each_subnode(node, blob, parent) {
579 const char *pathp;
580 int rc;
581
582 pathp = fdt_get_name(blob, node, NULL);
583 rc = it(node, pathp, data);
584 if (rc)
585 return rc;
586 }
587 return 0;
588 }
589
590 /**
591 * of_get_flat_dt_subnode_by_name - get the subnode by given name
592 *
593 * @node: the parent node
594 * @uname: the name of subnode
595 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
596 */
597
of_get_flat_dt_subnode_by_name(unsigned long node,const char * uname)598 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
599 {
600 return fdt_subnode_offset(initial_boot_params, node, uname);
601 }
602
603 /*
604 * of_get_flat_dt_root - find the root node in the flat blob
605 */
of_get_flat_dt_root(void)606 unsigned long __init of_get_flat_dt_root(void)
607 {
608 return 0;
609 }
610
611 /*
612 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
613 *
614 * This function can be used within scan_flattened_dt callback to get
615 * access to properties
616 */
of_get_flat_dt_prop(unsigned long node,const char * name,int * size)617 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
618 int *size)
619 {
620 return fdt_getprop(initial_boot_params, node, name, size);
621 }
622
623 /**
624 * of_fdt_is_compatible - Return true if given node from the given blob has
625 * compat in its compatible list
626 * @blob: A device tree blob
627 * @node: node to test
628 * @compat: compatible string to compare with compatible list.
629 *
630 * Return: a non-zero value on match with smaller values returned for more
631 * specific compatible values.
632 */
of_fdt_is_compatible(const void * blob,unsigned long node,const char * compat)633 static int of_fdt_is_compatible(const void *blob,
634 unsigned long node, const char *compat)
635 {
636 const char *cp;
637 int cplen;
638 unsigned long l, score = 0;
639
640 cp = fdt_getprop(blob, node, "compatible", &cplen);
641 if (cp == NULL)
642 return 0;
643 while (cplen > 0) {
644 score++;
645 if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
646 return score;
647 l = strlen(cp) + 1;
648 cp += l;
649 cplen -= l;
650 }
651
652 return 0;
653 }
654
655 /**
656 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
657 * @node: node to test
658 * @compat: compatible string to compare with compatible list.
659 */
of_flat_dt_is_compatible(unsigned long node,const char * compat)660 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
661 {
662 return of_fdt_is_compatible(initial_boot_params, node, compat);
663 }
664
665 /*
666 * of_flat_dt_match - Return true if node matches a list of compatible values
667 */
of_flat_dt_match(unsigned long node,const char * const * compat)668 static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
669 {
670 unsigned int tmp, score = 0;
671
672 if (!compat)
673 return 0;
674
675 while (*compat) {
676 tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
677 if (tmp && (score == 0 || (tmp < score)))
678 score = tmp;
679 compat++;
680 }
681
682 return score;
683 }
684
685 /*
686 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
687 */
of_get_flat_dt_phandle(unsigned long node)688 uint32_t __init of_get_flat_dt_phandle(unsigned long node)
689 {
690 return fdt_get_phandle(initial_boot_params, node);
691 }
692
of_flat_dt_get_machine_name(void)693 const char * __init of_flat_dt_get_machine_name(void)
694 {
695 const char *name;
696 unsigned long dt_root = of_get_flat_dt_root();
697
698 name = of_get_flat_dt_prop(dt_root, "model", NULL);
699 if (!name)
700 name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
701 return name;
702 }
703
704 /**
705 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
706 *
707 * @default_match: A machine specific ptr to return in case of no match.
708 * @get_next_compat: callback function to return next compatible match table.
709 *
710 * Iterate through machine match tables to find the best match for the machine
711 * compatible string in the FDT.
712 */
of_flat_dt_match_machine(const void * default_match,const void * (* get_next_compat)(const char * const **))713 const void * __init of_flat_dt_match_machine(const void *default_match,
714 const void * (*get_next_compat)(const char * const**))
715 {
716 const void *data = NULL;
717 const void *best_data = default_match;
718 const char *const *compat;
719 unsigned long dt_root;
720 unsigned int best_score = ~1, score = 0;
721
722 dt_root = of_get_flat_dt_root();
723 while ((data = get_next_compat(&compat))) {
724 score = of_flat_dt_match(dt_root, compat);
725 if (score > 0 && score < best_score) {
726 best_data = data;
727 best_score = score;
728 }
729 }
730 if (!best_data) {
731 const char *prop;
732 int size;
733
734 pr_err("\n unrecognized device tree list:\n[ ");
735
736 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
737 if (prop) {
738 while (size > 0) {
739 printk("'%s' ", prop);
740 size -= strlen(prop) + 1;
741 prop += strlen(prop) + 1;
742 }
743 }
744 printk("]\n\n");
745 return NULL;
746 }
747
748 pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
749
750 return best_data;
751 }
752
__early_init_dt_declare_initrd(unsigned long start,unsigned long end)753 static void __early_init_dt_declare_initrd(unsigned long start,
754 unsigned long end)
755 {
756 /*
757 * __va() is not yet available this early on some platforms. In that
758 * case, the platform uses phys_initrd_start/phys_initrd_size instead
759 * and does the VA conversion itself.
760 */
761 if (!IS_ENABLED(CONFIG_ARM64) &&
762 !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
763 initrd_start = (unsigned long)__va(start);
764 initrd_end = (unsigned long)__va(end);
765 initrd_below_start_ok = 1;
766 }
767 }
768
769 /**
770 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
771 * @node: reference to node containing initrd location ('chosen')
772 */
early_init_dt_check_for_initrd(unsigned long node)773 static void __init early_init_dt_check_for_initrd(unsigned long node)
774 {
775 u64 start, end;
776 int len;
777 const __be32 *prop;
778
779 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
780 return;
781
782 pr_debug("Looking for initrd properties... ");
783
784 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
785 if (!prop)
786 return;
787 start = of_read_number(prop, len/4);
788
789 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
790 if (!prop)
791 return;
792 end = of_read_number(prop, len/4);
793 if (start > end)
794 return;
795
796 __early_init_dt_declare_initrd(start, end);
797 phys_initrd_start = start;
798 phys_initrd_size = end - start;
799
800 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
801 }
802
803 /**
804 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
805 * tree
806 * @node: reference to node containing elfcorehdr location ('chosen')
807 */
early_init_dt_check_for_elfcorehdr(unsigned long node)808 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
809 {
810 const __be32 *prop;
811 int len;
812
813 if (!IS_ENABLED(CONFIG_CRASH_DUMP))
814 return;
815
816 pr_debug("Looking for elfcorehdr property... ");
817
818 prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
819 if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
820 return;
821
822 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
823 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
824
825 pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
826 elfcorehdr_addr, elfcorehdr_size);
827 }
828
829 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
830
831 /*
832 * The main usage of linux,usable-memory-range is for crash dump kernel.
833 * Originally, the number of usable-memory regions is one. Now there may
834 * be two regions, low region and high region.
835 * To make compatibility with existing user-space and older kdump, the low
836 * region is always the last range of linux,usable-memory-range if exist.
837 */
838 #define MAX_USABLE_RANGES 2
839
840 /**
841 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
842 * location from flat tree
843 */
early_init_dt_check_for_usable_mem_range(void)844 void __init early_init_dt_check_for_usable_mem_range(void)
845 {
846 struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
847 const __be32 *prop, *endp;
848 int len, i;
849 unsigned long node = chosen_node_offset;
850
851 if ((long)node < 0)
852 return;
853
854 pr_debug("Looking for usable-memory-range property... ");
855
856 prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
857 if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
858 return;
859
860 endp = prop + (len / sizeof(__be32));
861 for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
862 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
863 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
864
865 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
866 i, &rgn[i].base, &rgn[i].size);
867 }
868
869 memblock_cap_memory_range(rgn[0].base, rgn[0].size);
870 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
871 memblock_add(rgn[i].base, rgn[i].size);
872 }
873
874 #ifdef CONFIG_SERIAL_EARLYCON
875
early_init_dt_scan_chosen_stdout(void)876 int __init early_init_dt_scan_chosen_stdout(void)
877 {
878 int offset;
879 const char *p, *q, *options = NULL;
880 int l;
881 const struct earlycon_id *match;
882 const void *fdt = initial_boot_params;
883 int ret;
884
885 offset = fdt_path_offset(fdt, "/chosen");
886 if (offset < 0)
887 offset = fdt_path_offset(fdt, "/chosen@0");
888 if (offset < 0)
889 return -ENOENT;
890
891 p = fdt_getprop(fdt, offset, "stdout-path", &l);
892 if (!p)
893 p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
894 if (!p || !l)
895 return -ENOENT;
896
897 q = strchrnul(p, ':');
898 if (*q != '\0')
899 options = q + 1;
900 l = q - p;
901
902 /* Get the node specified by stdout-path */
903 offset = fdt_path_offset_namelen(fdt, p, l);
904 if (offset < 0) {
905 pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
906 return 0;
907 }
908
909 for (match = __earlycon_table; match < __earlycon_table_end; match++) {
910 if (!match->compatible[0])
911 continue;
912
913 if (fdt_node_check_compatible(fdt, offset, match->compatible))
914 continue;
915
916 ret = of_setup_earlycon(match, offset, options);
917 if (!ret || ret == -EALREADY)
918 return 0;
919 }
920 return -ENODEV;
921 }
922 #endif
923
924 /*
925 * early_init_dt_scan_root - fetch the top level address and size cells
926 */
early_init_dt_scan_root(void)927 int __init early_init_dt_scan_root(void)
928 {
929 const __be32 *prop;
930 const void *fdt = initial_boot_params;
931 int node = fdt_path_offset(fdt, "/");
932
933 if (node < 0)
934 return -ENODEV;
935
936 dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
937 dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
938
939 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
940 if (!WARN(!prop, "No '#size-cells' in root node\n"))
941 dt_root_size_cells = be32_to_cpup(prop);
942 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
943
944 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
945 if (!WARN(!prop, "No '#address-cells' in root node\n"))
946 dt_root_addr_cells = be32_to_cpup(prop);
947 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
948
949 return 0;
950 }
951
dt_mem_next_cell(int s,const __be32 ** cellp)952 u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
953 {
954 const __be32 *p = *cellp;
955
956 *cellp = p + s;
957 return of_read_number(p, s);
958 }
959
960 /*
961 * early_init_dt_scan_memory - Look for and parse memory nodes
962 */
early_init_dt_scan_memory(void)963 int __init early_init_dt_scan_memory(void)
964 {
965 int node, found_memory = 0;
966 const void *fdt = initial_boot_params;
967
968 fdt_for_each_subnode(node, fdt, 0) {
969 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
970 const __be32 *reg, *endp;
971 int l;
972 bool hotpluggable;
973
974 /* We are scanning "memory" nodes only */
975 if (type == NULL || strcmp(type, "memory") != 0)
976 continue;
977
978 if (!of_fdt_device_is_available(fdt, node))
979 continue;
980
981 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
982 if (reg == NULL)
983 reg = of_get_flat_dt_prop(node, "reg", &l);
984 if (reg == NULL)
985 continue;
986
987 endp = reg + (l / sizeof(__be32));
988 hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
989
990 pr_debug("memory scan node %s, reg size %d,\n",
991 fdt_get_name(fdt, node, NULL), l);
992
993 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
994 u64 base, size;
995
996 base = dt_mem_next_cell(dt_root_addr_cells, ®);
997 size = dt_mem_next_cell(dt_root_size_cells, ®);
998
999 if (size == 0)
1000 continue;
1001 pr_debug(" - %llx, %llx\n", base, size);
1002
1003 early_init_dt_add_memory_arch(base, size);
1004
1005 found_memory = 1;
1006
1007 if (!hotpluggable)
1008 continue;
1009
1010 if (memblock_mark_hotplug(base, size))
1011 pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1012 base, base + size);
1013 }
1014 }
1015 return found_memory;
1016 }
1017
early_init_dt_scan_chosen(char * cmdline)1018 int __init early_init_dt_scan_chosen(char *cmdline)
1019 {
1020 int l, node;
1021 const char *p;
1022 const void *rng_seed;
1023 const void *fdt = initial_boot_params;
1024
1025 node = fdt_path_offset(fdt, "/chosen");
1026 if (node < 0)
1027 node = fdt_path_offset(fdt, "/chosen@0");
1028 if (node < 0)
1029 /* Handle the cmdline config options even if no /chosen node */
1030 goto handle_cmdline;
1031
1032 chosen_node_offset = node;
1033
1034 early_init_dt_check_for_initrd(node);
1035 early_init_dt_check_for_elfcorehdr(node);
1036
1037 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1038 if (rng_seed && l > 0) {
1039 add_bootloader_randomness(rng_seed, l);
1040
1041 /* try to clear seed so it won't be found. */
1042 fdt_nop_property(initial_boot_params, node, "rng-seed");
1043
1044 /* update CRC check value */
1045 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1046 fdt_totalsize(initial_boot_params));
1047 }
1048
1049 /* Retrieve command line */
1050 p = of_get_flat_dt_prop(node, "bootargs", &l);
1051 if (p != NULL && l > 0)
1052 strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1053
1054 handle_cmdline:
1055 /*
1056 * CONFIG_CMDLINE is meant to be a default in case nothing else
1057 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1058 * is set in which case we override whatever was found earlier.
1059 */
1060 #ifdef CONFIG_CMDLINE
1061 #if defined(CONFIG_CMDLINE_EXTEND)
1062 strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1063 strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1064 #elif defined(CONFIG_CMDLINE_FORCE)
1065 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1066 #else
1067 /* No arguments from boot loader, use kernel's cmdl*/
1068 if (!((char *)cmdline)[0])
1069 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1070 #endif
1071 #endif /* CONFIG_CMDLINE */
1072
1073 pr_debug("Command line is: %s\n", (char *)cmdline);
1074
1075 return 0;
1076 }
1077
1078 #ifndef MIN_MEMBLOCK_ADDR
1079 #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
1080 #endif
1081 #ifndef MAX_MEMBLOCK_ADDR
1082 #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
1083 #endif
1084
early_init_dt_add_memory_arch(u64 base,u64 size)1085 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1086 {
1087 const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1088
1089 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1090 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1091 base, base + size);
1092 return;
1093 }
1094
1095 if (!PAGE_ALIGNED(base)) {
1096 size -= PAGE_SIZE - (base & ~PAGE_MASK);
1097 base = PAGE_ALIGN(base);
1098 }
1099 size &= PAGE_MASK;
1100
1101 if (base > MAX_MEMBLOCK_ADDR) {
1102 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1103 base, base + size);
1104 return;
1105 }
1106
1107 if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1108 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1109 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1110 size = MAX_MEMBLOCK_ADDR - base + 1;
1111 }
1112
1113 if (base + size < phys_offset) {
1114 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1115 base, base + size);
1116 return;
1117 }
1118 if (base < phys_offset) {
1119 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1120 base, phys_offset);
1121 size -= phys_offset - base;
1122 base = phys_offset;
1123 }
1124 memblock_add(base, size);
1125 }
1126
early_init_dt_alloc_memory_arch(u64 size,u64 align)1127 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1128 {
1129 void *ptr = memblock_alloc(size, align);
1130
1131 if (!ptr)
1132 panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1133 __func__, size, align);
1134
1135 return ptr;
1136 }
1137
early_init_dt_verify(void * dt_virt,phys_addr_t dt_phys)1138 bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
1139 {
1140 if (!dt_virt)
1141 return false;
1142
1143 /* check device tree validity */
1144 if (fdt_check_header(dt_virt))
1145 return false;
1146
1147 /* Setup flat device-tree pointer */
1148 initial_boot_params = dt_virt;
1149 initial_boot_params_pa = dt_phys;
1150 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1151 fdt_totalsize(initial_boot_params));
1152
1153 /* Initialize {size,address}-cells info */
1154 early_init_dt_scan_root();
1155
1156 return true;
1157 }
1158
1159
early_init_dt_scan_nodes(void)1160 void __init early_init_dt_scan_nodes(void)
1161 {
1162 int rc;
1163
1164 /* Retrieve various information from the /chosen node */
1165 rc = early_init_dt_scan_chosen(boot_command_line);
1166 if (rc)
1167 pr_warn("No chosen node found, continuing without\n");
1168
1169 /* Setup memory, calling early_init_dt_add_memory_arch */
1170 early_init_dt_scan_memory();
1171
1172 /* Handle linux,usable-memory-range property */
1173 early_init_dt_check_for_usable_mem_range();
1174 }
1175
early_init_dt_scan(void * dt_virt,phys_addr_t dt_phys)1176 bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
1177 {
1178 bool status;
1179
1180 status = early_init_dt_verify(dt_virt, dt_phys);
1181 if (!status)
1182 return false;
1183
1184 early_init_dt_scan_nodes();
1185 return true;
1186 }
1187
copy_device_tree(void * fdt)1188 static void *__init copy_device_tree(void *fdt)
1189 {
1190 int size;
1191 void *dt;
1192
1193 size = fdt_totalsize(fdt);
1194 dt = early_init_dt_alloc_memory_arch(size,
1195 roundup_pow_of_two(FDT_V17_SIZE));
1196
1197 if (dt)
1198 memcpy(dt, fdt, size);
1199
1200 return dt;
1201 }
1202
1203 /**
1204 * unflatten_device_tree - create tree of device_nodes from flat blob
1205 *
1206 * unflattens the device-tree passed by the firmware, creating the
1207 * tree of struct device_node. It also fills the "name" and "type"
1208 * pointers of the nodes so the normal device-tree walking functions
1209 * can be used.
1210 */
unflatten_device_tree(void)1211 void __init unflatten_device_tree(void)
1212 {
1213 void *fdt = initial_boot_params;
1214
1215 /* Save the statically-placed regions in the reserved_mem array */
1216 fdt_scan_reserved_mem_reg_nodes();
1217
1218 /* Don't use the bootloader provided DTB if ACPI is enabled */
1219 if (!acpi_disabled)
1220 fdt = NULL;
1221
1222 /*
1223 * Populate an empty root node when ACPI is enabled or bootloader
1224 * doesn't provide one.
1225 */
1226 if (!fdt) {
1227 fdt = (void *) __dtb_empty_root_begin;
1228 /* fdt_totalsize() will be used for copy size */
1229 if (fdt_totalsize(fdt) >
1230 __dtb_empty_root_end - __dtb_empty_root_begin) {
1231 pr_err("invalid size in dtb_empty_root\n");
1232 return;
1233 }
1234 of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
1235 fdt = copy_device_tree(fdt);
1236 }
1237
1238 __unflatten_device_tree(fdt, NULL, &of_root,
1239 early_init_dt_alloc_memory_arch, false);
1240
1241 /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1242 of_alias_scan(early_init_dt_alloc_memory_arch);
1243
1244 unittest_unflatten_overlay_base();
1245 }
1246
1247 /**
1248 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1249 *
1250 * Copies and unflattens the device-tree passed by the firmware, creating the
1251 * tree of struct device_node. It also fills the "name" and "type"
1252 * pointers of the nodes so the normal device-tree walking functions
1253 * can be used. This should only be used when the FDT memory has not been
1254 * reserved such is the case when the FDT is built-in to the kernel init
1255 * section. If the FDT memory is reserved already then unflatten_device_tree
1256 * should be used instead.
1257 */
unflatten_and_copy_device_tree(void)1258 void __init unflatten_and_copy_device_tree(void)
1259 {
1260 if (initial_boot_params)
1261 initial_boot_params = copy_device_tree(initial_boot_params);
1262
1263 unflatten_device_tree();
1264 }
1265
1266 #ifdef CONFIG_SYSFS
of_fdt_raw_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1267 static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1268 struct bin_attribute *bin_attr,
1269 char *buf, loff_t off, size_t count)
1270 {
1271 memcpy(buf, initial_boot_params + off, count);
1272 return count;
1273 }
1274
of_fdt_raw_init(void)1275 static int __init of_fdt_raw_init(void)
1276 {
1277 static struct bin_attribute of_fdt_raw_attr =
1278 __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1279
1280 if (!initial_boot_params)
1281 return 0;
1282
1283 if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1284 fdt_totalsize(initial_boot_params))) {
1285 pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1286 return 0;
1287 }
1288 of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1289 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1290 }
1291 late_initcall(of_fdt_raw_init);
1292 #endif
1293
1294 #endif /* CONFIG_OF_EARLY_FLATTREE */
1295