1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Procedures for creating, accessing and interpreting the device tree.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 *
11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
12 *
13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
14 * Grant Likely.
15 */
16
17 #define pr_fmt(fmt) "OF: " fmt
18
19 #include <linux/cleanup.h>
20 #include <linux/console.h>
21 #include <linux/ctype.h>
22 #include <linux/cpu.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/of_graph.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/string.h>
30 #include <linux/proc_fs.h>
31
32 #include "of_private.h"
33
34 LIST_HEAD(aliases_lookup);
35
36 struct device_node *of_root;
37 EXPORT_SYMBOL(of_root);
38 struct device_node *of_chosen;
39 EXPORT_SYMBOL(of_chosen);
40 struct device_node *of_aliases;
41 struct device_node *of_stdout;
42 static const char *of_stdout_options;
43
44 struct kset *of_kset;
45
46 /*
47 * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
48 * This mutex must be held whenever modifications are being made to the
49 * device tree. The of_{attach,detach}_node() and
50 * of_{add,remove,update}_property() helpers make sure this happens.
51 */
52 DEFINE_MUTEX(of_mutex);
53
54 /* use when traversing tree through the child, sibling,
55 * or parent members of struct device_node.
56 */
57 DEFINE_RAW_SPINLOCK(devtree_lock);
58
of_node_name_eq(const struct device_node * np,const char * name)59 bool of_node_name_eq(const struct device_node *np, const char *name)
60 {
61 const char *node_name;
62 size_t len;
63
64 if (!np)
65 return false;
66
67 node_name = kbasename(np->full_name);
68 len = strchrnul(node_name, '@') - node_name;
69
70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
71 }
72 EXPORT_SYMBOL(of_node_name_eq);
73
of_node_name_prefix(const struct device_node * np,const char * prefix)74 bool of_node_name_prefix(const struct device_node *np, const char *prefix)
75 {
76 if (!np)
77 return false;
78
79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
80 }
81 EXPORT_SYMBOL(of_node_name_prefix);
82
__of_node_is_type(const struct device_node * np,const char * type)83 static bool __of_node_is_type(const struct device_node *np, const char *type)
84 {
85 const char *match = __of_get_property(np, "device_type", NULL);
86
87 return np && match && type && !strcmp(match, type);
88 }
89
90 #define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \
91 IS_ENABLED(CONFIG_SPARC) || \
92 of_find_compatible_node(NULL, NULL, "coreboot") \
93 )
94
of_bus_n_addr_cells(struct device_node * np)95 int of_bus_n_addr_cells(struct device_node *np)
96 {
97 u32 cells;
98
99 for (; np; np = np->parent) {
100 if (!of_property_read_u32(np, "#address-cells", &cells))
101 return cells;
102 /*
103 * Default root value and walking parent nodes for "#address-cells"
104 * is deprecated. Any platforms which hit this warning should
105 * be added to the excluded list.
106 */
107 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS,
108 "Missing '#address-cells' in %pOF\n", np);
109 }
110 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
111 }
112
of_n_addr_cells(struct device_node * np)113 int of_n_addr_cells(struct device_node *np)
114 {
115 if (np->parent)
116 np = np->parent;
117
118 return of_bus_n_addr_cells(np);
119 }
120 EXPORT_SYMBOL(of_n_addr_cells);
121
of_bus_n_size_cells(struct device_node * np)122 int of_bus_n_size_cells(struct device_node *np)
123 {
124 u32 cells;
125
126 for (; np; np = np->parent) {
127 if (!of_property_read_u32(np, "#size-cells", &cells))
128 return cells;
129 /*
130 * Default root value and walking parent nodes for "#size-cells"
131 * is deprecated. Any platforms which hit this warning should
132 * be added to the excluded list.
133 */
134 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS,
135 "Missing '#size-cells' in %pOF\n", np);
136 }
137 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
138 }
139
of_n_size_cells(struct device_node * np)140 int of_n_size_cells(struct device_node *np)
141 {
142 if (np->parent)
143 np = np->parent;
144
145 return of_bus_n_size_cells(np);
146 }
147 EXPORT_SYMBOL(of_n_size_cells);
148
149 #ifdef CONFIG_NUMA
of_node_to_nid(struct device_node * np)150 int __weak of_node_to_nid(struct device_node *np)
151 {
152 return NUMA_NO_NODE;
153 }
154 #endif
155
156 #define OF_PHANDLE_CACHE_BITS 7
157 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
158
159 static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
160
of_phandle_cache_hash(phandle handle)161 static u32 of_phandle_cache_hash(phandle handle)
162 {
163 return hash_32(handle, OF_PHANDLE_CACHE_BITS);
164 }
165
166 /*
167 * Caller must hold devtree_lock.
168 */
__of_phandle_cache_inv_entry(phandle handle)169 void __of_phandle_cache_inv_entry(phandle handle)
170 {
171 u32 handle_hash;
172 struct device_node *np;
173
174 if (!handle)
175 return;
176
177 handle_hash = of_phandle_cache_hash(handle);
178
179 np = phandle_cache[handle_hash];
180 if (np && handle == np->phandle)
181 phandle_cache[handle_hash] = NULL;
182 }
183
of_core_init(void)184 void __init of_core_init(void)
185 {
186 struct device_node *np;
187
188 of_platform_register_reconfig_notifier();
189
190 /* Create the kset, and register existing nodes */
191 mutex_lock(&of_mutex);
192 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
193 if (!of_kset) {
194 mutex_unlock(&of_mutex);
195 pr_err("failed to register existing nodes\n");
196 return;
197 }
198 for_each_of_allnodes(np) {
199 __of_attach_node_sysfs(np);
200 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
201 phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
202 }
203 mutex_unlock(&of_mutex);
204
205 /* Symlink in /proc as required by userspace ABI */
206 if (of_root)
207 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
208 }
209
__of_find_property(const struct device_node * np,const char * name,int * lenp)210 static struct property *__of_find_property(const struct device_node *np,
211 const char *name, int *lenp)
212 {
213 struct property *pp;
214
215 if (!np)
216 return NULL;
217
218 for (pp = np->properties; pp; pp = pp->next) {
219 if (of_prop_cmp(pp->name, name) == 0) {
220 if (lenp)
221 *lenp = pp->length;
222 break;
223 }
224 }
225
226 return pp;
227 }
228
of_find_property(const struct device_node * np,const char * name,int * lenp)229 struct property *of_find_property(const struct device_node *np,
230 const char *name,
231 int *lenp)
232 {
233 struct property *pp;
234 unsigned long flags;
235
236 raw_spin_lock_irqsave(&devtree_lock, flags);
237 pp = __of_find_property(np, name, lenp);
238 raw_spin_unlock_irqrestore(&devtree_lock, flags);
239
240 return pp;
241 }
242 EXPORT_SYMBOL(of_find_property);
243
__of_find_all_nodes(struct device_node * prev)244 struct device_node *__of_find_all_nodes(struct device_node *prev)
245 {
246 struct device_node *np;
247 if (!prev) {
248 np = of_root;
249 } else if (prev->child) {
250 np = prev->child;
251 } else {
252 /* Walk back up looking for a sibling, or the end of the structure */
253 np = prev;
254 while (np->parent && !np->sibling)
255 np = np->parent;
256 np = np->sibling; /* Might be null at the end of the tree */
257 }
258 return np;
259 }
260
261 /**
262 * of_find_all_nodes - Get next node in global list
263 * @prev: Previous node or NULL to start iteration
264 * of_node_put() will be called on it
265 *
266 * Return: A node pointer with refcount incremented, use
267 * of_node_put() on it when done.
268 */
of_find_all_nodes(struct device_node * prev)269 struct device_node *of_find_all_nodes(struct device_node *prev)
270 {
271 struct device_node *np;
272 unsigned long flags;
273
274 raw_spin_lock_irqsave(&devtree_lock, flags);
275 np = __of_find_all_nodes(prev);
276 of_node_get(np);
277 of_node_put(prev);
278 raw_spin_unlock_irqrestore(&devtree_lock, flags);
279 return np;
280 }
281 EXPORT_SYMBOL(of_find_all_nodes);
282
283 /*
284 * Find a property with a given name for a given node
285 * and return the value.
286 */
__of_get_property(const struct device_node * np,const char * name,int * lenp)287 const void *__of_get_property(const struct device_node *np,
288 const char *name, int *lenp)
289 {
290 const struct property *pp = __of_find_property(np, name, lenp);
291
292 return pp ? pp->value : NULL;
293 }
294
295 /*
296 * Find a property with a given name for a given node
297 * and return the value.
298 */
of_get_property(const struct device_node * np,const char * name,int * lenp)299 const void *of_get_property(const struct device_node *np, const char *name,
300 int *lenp)
301 {
302 const struct property *pp = of_find_property(np, name, lenp);
303
304 return pp ? pp->value : NULL;
305 }
306 EXPORT_SYMBOL(of_get_property);
307
308 /**
309 * __of_device_is_compatible() - Check if the node matches given constraints
310 * @device: pointer to node
311 * @compat: required compatible string, NULL or "" for any match
312 * @type: required device_type value, NULL or "" for any match
313 * @name: required node name, NULL or "" for any match
314 *
315 * Checks if the given @compat, @type and @name strings match the
316 * properties of the given @device. A constraints can be skipped by
317 * passing NULL or an empty string as the constraint.
318 *
319 * Returns 0 for no match, and a positive integer on match. The return
320 * value is a relative score with larger values indicating better
321 * matches. The score is weighted for the most specific compatible value
322 * to get the highest score. Matching type is next, followed by matching
323 * name. Practically speaking, this results in the following priority
324 * order for matches:
325 *
326 * 1. specific compatible && type && name
327 * 2. specific compatible && type
328 * 3. specific compatible && name
329 * 4. specific compatible
330 * 5. general compatible && type && name
331 * 6. general compatible && type
332 * 7. general compatible && name
333 * 8. general compatible
334 * 9. type && name
335 * 10. type
336 * 11. name
337 */
__of_device_is_compatible(const struct device_node * device,const char * compat,const char * type,const char * name)338 static int __of_device_is_compatible(const struct device_node *device,
339 const char *compat, const char *type, const char *name)
340 {
341 const struct property *prop;
342 const char *cp;
343 int index = 0, score = 0;
344
345 /* Compatible match has highest priority */
346 if (compat && compat[0]) {
347 prop = __of_find_property(device, "compatible", NULL);
348 for (cp = of_prop_next_string(prop, NULL); cp;
349 cp = of_prop_next_string(prop, cp), index++) {
350 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
351 score = INT_MAX/2 - (index << 2);
352 break;
353 }
354 }
355 if (!score)
356 return 0;
357 }
358
359 /* Matching type is better than matching name */
360 if (type && type[0]) {
361 if (!__of_node_is_type(device, type))
362 return 0;
363 score += 2;
364 }
365
366 /* Matching name is a bit better than not */
367 if (name && name[0]) {
368 if (!of_node_name_eq(device, name))
369 return 0;
370 score++;
371 }
372
373 return score;
374 }
375
376 /** Checks if the given "compat" string matches one of the strings in
377 * the device's "compatible" property
378 */
of_device_is_compatible(const struct device_node * device,const char * compat)379 int of_device_is_compatible(const struct device_node *device,
380 const char *compat)
381 {
382 unsigned long flags;
383 int res;
384
385 raw_spin_lock_irqsave(&devtree_lock, flags);
386 res = __of_device_is_compatible(device, compat, NULL, NULL);
387 raw_spin_unlock_irqrestore(&devtree_lock, flags);
388 return res;
389 }
390 EXPORT_SYMBOL(of_device_is_compatible);
391
392 /** Checks if the device is compatible with any of the entries in
393 * a NULL terminated array of strings. Returns the best match
394 * score or 0.
395 */
of_device_compatible_match(const struct device_node * device,const char * const * compat)396 int of_device_compatible_match(const struct device_node *device,
397 const char *const *compat)
398 {
399 unsigned int tmp, score = 0;
400
401 if (!compat)
402 return 0;
403
404 while (*compat) {
405 tmp = of_device_is_compatible(device, *compat);
406 if (tmp > score)
407 score = tmp;
408 compat++;
409 }
410
411 return score;
412 }
413 EXPORT_SYMBOL_GPL(of_device_compatible_match);
414
415 /**
416 * of_machine_compatible_match - Test root of device tree against a compatible array
417 * @compats: NULL terminated array of compatible strings to look for in root node's compatible property.
418 *
419 * Returns true if the root node has any of the given compatible values in its
420 * compatible property.
421 */
of_machine_compatible_match(const char * const * compats)422 bool of_machine_compatible_match(const char *const *compats)
423 {
424 struct device_node *root;
425 int rc = 0;
426
427 root = of_find_node_by_path("/");
428 if (root) {
429 rc = of_device_compatible_match(root, compats);
430 of_node_put(root);
431 }
432
433 return rc != 0;
434 }
435 EXPORT_SYMBOL(of_machine_compatible_match);
436
437 /**
438 * of_machine_device_match - Test root of device tree against a of_device_id array
439 * @matches: NULL terminated array of of_device_id match structures to search in
440 *
441 * Returns true if the root node has any of the given compatible values in its
442 * compatible property.
443 */
of_machine_device_match(const struct of_device_id * matches)444 bool of_machine_device_match(const struct of_device_id *matches)
445 {
446 struct device_node *root;
447 const struct of_device_id *match = NULL;
448
449 root = of_find_node_by_path("/");
450 if (root) {
451 match = of_match_node(matches, root);
452 of_node_put(root);
453 }
454
455 return match != NULL;
456 }
457 EXPORT_SYMBOL(of_machine_device_match);
458
459 /**
460 * of_machine_get_match_data - Tell if root of device tree has a matching of_match structure
461 * @matches: NULL terminated array of of_device_id match structures to search in
462 *
463 * Returns data associated with matched entry or NULL
464 */
of_machine_get_match_data(const struct of_device_id * matches)465 const void *of_machine_get_match_data(const struct of_device_id *matches)
466 {
467 const struct of_device_id *match;
468 struct device_node *root;
469
470 root = of_find_node_by_path("/");
471 if (!root)
472 return NULL;
473
474 match = of_match_node(matches, root);
475 of_node_put(root);
476
477 if (!match)
478 return NULL;
479
480 return match->data;
481 }
482 EXPORT_SYMBOL(of_machine_get_match_data);
483
__of_device_is_status(const struct device_node * device,const char * const * strings)484 static bool __of_device_is_status(const struct device_node *device,
485 const char * const*strings)
486 {
487 const char *status;
488 int statlen;
489
490 if (!device)
491 return false;
492
493 status = __of_get_property(device, "status", &statlen);
494 if (status == NULL)
495 return false;
496
497 if (statlen > 0) {
498 while (*strings) {
499 unsigned int len = strlen(*strings);
500
501 if ((*strings)[len - 1] == '-') {
502 if (!strncmp(status, *strings, len))
503 return true;
504 } else {
505 if (!strcmp(status, *strings))
506 return true;
507 }
508 strings++;
509 }
510 }
511
512 return false;
513 }
514
515 /**
516 * __of_device_is_available - check if a device is available for use
517 *
518 * @device: Node to check for availability, with locks already held
519 *
520 * Return: True if the status property is absent or set to "okay" or "ok",
521 * false otherwise
522 */
__of_device_is_available(const struct device_node * device)523 static bool __of_device_is_available(const struct device_node *device)
524 {
525 static const char * const ok[] = {"okay", "ok", NULL};
526
527 if (!device)
528 return false;
529
530 return !__of_get_property(device, "status", NULL) ||
531 __of_device_is_status(device, ok);
532 }
533
534 /**
535 * __of_device_is_reserved - check if a device is reserved
536 *
537 * @device: Node to check for availability, with locks already held
538 *
539 * Return: True if the status property is set to "reserved", false otherwise
540 */
__of_device_is_reserved(const struct device_node * device)541 static bool __of_device_is_reserved(const struct device_node *device)
542 {
543 static const char * const reserved[] = {"reserved", NULL};
544
545 return __of_device_is_status(device, reserved);
546 }
547
548 /**
549 * of_device_is_available - check if a device is available for use
550 *
551 * @device: Node to check for availability
552 *
553 * Return: True if the status property is absent or set to "okay" or "ok",
554 * false otherwise
555 */
of_device_is_available(const struct device_node * device)556 bool of_device_is_available(const struct device_node *device)
557 {
558 unsigned long flags;
559 bool res;
560
561 raw_spin_lock_irqsave(&devtree_lock, flags);
562 res = __of_device_is_available(device);
563 raw_spin_unlock_irqrestore(&devtree_lock, flags);
564 return res;
565
566 }
567 EXPORT_SYMBOL(of_device_is_available);
568
569 /**
570 * __of_device_is_fail - check if a device has status "fail" or "fail-..."
571 *
572 * @device: Node to check status for, with locks already held
573 *
574 * Return: True if the status property is set to "fail" or "fail-..." (for any
575 * error code suffix), false otherwise
576 */
__of_device_is_fail(const struct device_node * device)577 static bool __of_device_is_fail(const struct device_node *device)
578 {
579 static const char * const fail[] = {"fail", "fail-", NULL};
580
581 return __of_device_is_status(device, fail);
582 }
583
584 /**
585 * of_device_is_big_endian - check if a device has BE registers
586 *
587 * @device: Node to check for endianness
588 *
589 * Return: True if the device has a "big-endian" property, or if the kernel
590 * was compiled for BE *and* the device has a "native-endian" property.
591 * Returns false otherwise.
592 *
593 * Callers would nominally use ioread32be/iowrite32be if
594 * of_device_is_big_endian() == true, or readl/writel otherwise.
595 */
of_device_is_big_endian(const struct device_node * device)596 bool of_device_is_big_endian(const struct device_node *device)
597 {
598 if (of_property_read_bool(device, "big-endian"))
599 return true;
600 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
601 of_property_read_bool(device, "native-endian"))
602 return true;
603 return false;
604 }
605 EXPORT_SYMBOL(of_device_is_big_endian);
606
607 /**
608 * of_get_parent - Get a node's parent if any
609 * @node: Node to get parent
610 *
611 * Return: A node pointer with refcount incremented, use
612 * of_node_put() on it when done.
613 */
of_get_parent(const struct device_node * node)614 struct device_node *of_get_parent(const struct device_node *node)
615 {
616 struct device_node *np;
617 unsigned long flags;
618
619 if (!node)
620 return NULL;
621
622 raw_spin_lock_irqsave(&devtree_lock, flags);
623 np = of_node_get(node->parent);
624 raw_spin_unlock_irqrestore(&devtree_lock, flags);
625 return np;
626 }
627 EXPORT_SYMBOL(of_get_parent);
628
629 /**
630 * of_get_next_parent - Iterate to a node's parent
631 * @node: Node to get parent of
632 *
633 * This is like of_get_parent() except that it drops the
634 * refcount on the passed node, making it suitable for iterating
635 * through a node's parents.
636 *
637 * Return: A node pointer with refcount incremented, use
638 * of_node_put() on it when done.
639 */
of_get_next_parent(struct device_node * node)640 struct device_node *of_get_next_parent(struct device_node *node)
641 {
642 struct device_node *parent;
643 unsigned long flags;
644
645 if (!node)
646 return NULL;
647
648 raw_spin_lock_irqsave(&devtree_lock, flags);
649 parent = of_node_get(node->parent);
650 of_node_put(node);
651 raw_spin_unlock_irqrestore(&devtree_lock, flags);
652 return parent;
653 }
654 EXPORT_SYMBOL(of_get_next_parent);
655
__of_get_next_child(const struct device_node * node,struct device_node * prev)656 static struct device_node *__of_get_next_child(const struct device_node *node,
657 struct device_node *prev)
658 {
659 struct device_node *next;
660
661 if (!node)
662 return NULL;
663
664 next = prev ? prev->sibling : node->child;
665 of_node_get(next);
666 of_node_put(prev);
667 return next;
668 }
669 #define __for_each_child_of_node(parent, child) \
670 for (child = __of_get_next_child(parent, NULL); child != NULL; \
671 child = __of_get_next_child(parent, child))
672
673 /**
674 * of_get_next_child - Iterate a node childs
675 * @node: parent node
676 * @prev: previous child of the parent node, or NULL to get first
677 *
678 * Return: A node pointer with refcount incremented, use of_node_put() on
679 * it when done. Returns NULL when prev is the last child. Decrements the
680 * refcount of prev.
681 */
of_get_next_child(const struct device_node * node,struct device_node * prev)682 struct device_node *of_get_next_child(const struct device_node *node,
683 struct device_node *prev)
684 {
685 struct device_node *next;
686 unsigned long flags;
687
688 raw_spin_lock_irqsave(&devtree_lock, flags);
689 next = __of_get_next_child(node, prev);
690 raw_spin_unlock_irqrestore(&devtree_lock, flags);
691 return next;
692 }
693 EXPORT_SYMBOL(of_get_next_child);
694
695 /**
696 * of_get_next_child_with_prefix - Find the next child node with prefix
697 * @node: parent node
698 * @prev: previous child of the parent node, or NULL to get first
699 * @prefix: prefix that the node name should have
700 *
701 * This function is like of_get_next_child(), except that it automatically
702 * skips any nodes whose name doesn't have the given prefix.
703 *
704 * Return: A node pointer with refcount incremented, use
705 * of_node_put() on it when done.
706 */
of_get_next_child_with_prefix(const struct device_node * node,struct device_node * prev,const char * prefix)707 struct device_node *of_get_next_child_with_prefix(const struct device_node *node,
708 struct device_node *prev,
709 const char *prefix)
710 {
711 struct device_node *next;
712 unsigned long flags;
713
714 if (!node)
715 return NULL;
716
717 raw_spin_lock_irqsave(&devtree_lock, flags);
718 next = prev ? prev->sibling : node->child;
719 for (; next; next = next->sibling) {
720 if (!of_node_name_prefix(next, prefix))
721 continue;
722 if (of_node_get(next))
723 break;
724 }
725 of_node_put(prev);
726 raw_spin_unlock_irqrestore(&devtree_lock, flags);
727 return next;
728 }
729 EXPORT_SYMBOL(of_get_next_child_with_prefix);
730
of_get_next_status_child(const struct device_node * node,struct device_node * prev,bool (* checker)(const struct device_node *))731 static struct device_node *of_get_next_status_child(const struct device_node *node,
732 struct device_node *prev,
733 bool (*checker)(const struct device_node *))
734 {
735 struct device_node *next;
736 unsigned long flags;
737
738 if (!node)
739 return NULL;
740
741 raw_spin_lock_irqsave(&devtree_lock, flags);
742 next = prev ? prev->sibling : node->child;
743 for (; next; next = next->sibling) {
744 if (!checker(next))
745 continue;
746 if (of_node_get(next))
747 break;
748 }
749 of_node_put(prev);
750 raw_spin_unlock_irqrestore(&devtree_lock, flags);
751 return next;
752 }
753
754 /**
755 * of_get_next_available_child - Find the next available child node
756 * @node: parent node
757 * @prev: previous child of the parent node, or NULL to get first
758 *
759 * This function is like of_get_next_child(), except that it
760 * automatically skips any disabled nodes (i.e. status = "disabled").
761 */
of_get_next_available_child(const struct device_node * node,struct device_node * prev)762 struct device_node *of_get_next_available_child(const struct device_node *node,
763 struct device_node *prev)
764 {
765 return of_get_next_status_child(node, prev, __of_device_is_available);
766 }
767 EXPORT_SYMBOL(of_get_next_available_child);
768
769 /**
770 * of_get_next_reserved_child - Find the next reserved child node
771 * @node: parent node
772 * @prev: previous child of the parent node, or NULL to get first
773 *
774 * This function is like of_get_next_child(), except that it
775 * automatically skips any disabled nodes (i.e. status = "disabled").
776 */
of_get_next_reserved_child(const struct device_node * node,struct device_node * prev)777 struct device_node *of_get_next_reserved_child(const struct device_node *node,
778 struct device_node *prev)
779 {
780 return of_get_next_status_child(node, prev, __of_device_is_reserved);
781 }
782 EXPORT_SYMBOL(of_get_next_reserved_child);
783
784 /**
785 * of_get_next_cpu_node - Iterate on cpu nodes
786 * @prev: previous child of the /cpus node, or NULL to get first
787 *
788 * Unusable CPUs (those with the status property set to "fail" or "fail-...")
789 * will be skipped.
790 *
791 * Return: A cpu node pointer with refcount incremented, use of_node_put()
792 * on it when done. Returns NULL when prev is the last child. Decrements
793 * the refcount of prev.
794 */
of_get_next_cpu_node(struct device_node * prev)795 struct device_node *of_get_next_cpu_node(struct device_node *prev)
796 {
797 struct device_node *next = NULL;
798 unsigned long flags;
799 struct device_node *node;
800
801 if (!prev)
802 node = of_find_node_by_path("/cpus");
803
804 raw_spin_lock_irqsave(&devtree_lock, flags);
805 if (prev)
806 next = prev->sibling;
807 else if (node) {
808 next = node->child;
809 of_node_put(node);
810 }
811 for (; next; next = next->sibling) {
812 if (__of_device_is_fail(next))
813 continue;
814 if (!(of_node_name_eq(next, "cpu") ||
815 __of_node_is_type(next, "cpu")))
816 continue;
817 if (of_node_get(next))
818 break;
819 }
820 of_node_put(prev);
821 raw_spin_unlock_irqrestore(&devtree_lock, flags);
822 return next;
823 }
824 EXPORT_SYMBOL(of_get_next_cpu_node);
825
826 /**
827 * of_get_compatible_child - Find compatible child node
828 * @parent: parent node
829 * @compatible: compatible string
830 *
831 * Lookup child node whose compatible property contains the given compatible
832 * string.
833 *
834 * Return: a node pointer with refcount incremented, use of_node_put() on it
835 * when done; or NULL if not found.
836 */
of_get_compatible_child(const struct device_node * parent,const char * compatible)837 struct device_node *of_get_compatible_child(const struct device_node *parent,
838 const char *compatible)
839 {
840 struct device_node *child;
841
842 for_each_child_of_node(parent, child) {
843 if (of_device_is_compatible(child, compatible))
844 break;
845 }
846
847 return child;
848 }
849 EXPORT_SYMBOL(of_get_compatible_child);
850
851 /**
852 * of_get_child_by_name - Find the child node by name for a given parent
853 * @node: parent node
854 * @name: child name to look for.
855 *
856 * This function looks for child node for given matching name
857 *
858 * Return: A node pointer if found, with refcount incremented, use
859 * of_node_put() on it when done.
860 * Returns NULL if node is not found.
861 */
of_get_child_by_name(const struct device_node * node,const char * name)862 struct device_node *of_get_child_by_name(const struct device_node *node,
863 const char *name)
864 {
865 struct device_node *child;
866
867 for_each_child_of_node(node, child)
868 if (of_node_name_eq(child, name))
869 break;
870 return child;
871 }
872 EXPORT_SYMBOL(of_get_child_by_name);
873
874 /**
875 * of_get_available_child_by_name - Find the available child node by name for a given parent
876 * @node: parent node
877 * @name: child name to look for.
878 *
879 * This function looks for child node for given matching name and checks the
880 * device's availability for use.
881 *
882 * Return: A node pointer if found, with refcount incremented, use
883 * of_node_put() on it when done.
884 * Returns NULL if node is not found.
885 */
of_get_available_child_by_name(const struct device_node * node,const char * name)886 struct device_node *of_get_available_child_by_name(const struct device_node *node,
887 const char *name)
888 {
889 struct device_node *child;
890
891 child = of_get_child_by_name(node, name);
892 if (child && !of_device_is_available(child)) {
893 of_node_put(child);
894 return NULL;
895 }
896
897 return child;
898 }
899 EXPORT_SYMBOL(of_get_available_child_by_name);
900
__of_find_node_by_path(const struct device_node * parent,const char * path)901 struct device_node *__of_find_node_by_path(const struct device_node *parent,
902 const char *path)
903 {
904 struct device_node *child;
905 int len;
906
907 len = strcspn(path, "/:");
908 if (!len)
909 return NULL;
910
911 __for_each_child_of_node(parent, child) {
912 const char *name = kbasename(child->full_name);
913 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
914 return child;
915 }
916 return NULL;
917 }
918
__of_find_node_by_full_path(struct device_node * node,const char * path)919 struct device_node *__of_find_node_by_full_path(struct device_node *node,
920 const char *path)
921 {
922 const char *separator = strchr(path, ':');
923
924 while (node && *path == '/') {
925 struct device_node *tmp = node;
926
927 path++; /* Increment past '/' delimiter */
928 node = __of_find_node_by_path(node, path);
929 of_node_put(tmp);
930 path = strchrnul(path, '/');
931 if (separator && separator < path)
932 break;
933 }
934 return node;
935 }
936
937 /**
938 * of_find_node_opts_by_path - Find a node matching a full OF path
939 * @path: Either the full path to match, or if the path does not
940 * start with '/', the name of a property of the /aliases
941 * node (an alias). In the case of an alias, the node
942 * matching the alias' value will be returned.
943 * @opts: Address of a pointer into which to store the start of
944 * an options string appended to the end of the path with
945 * a ':' separator.
946 *
947 * Valid paths:
948 * * /foo/bar Full path
949 * * foo Valid alias
950 * * foo/bar Valid alias + relative path
951 *
952 * Return: A node pointer with refcount incremented, use
953 * of_node_put() on it when done.
954 */
of_find_node_opts_by_path(const char * path,const char ** opts)955 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
956 {
957 struct device_node *np = NULL;
958 const struct property *pp;
959 unsigned long flags;
960 const char *separator = strchr(path, ':');
961
962 if (opts)
963 *opts = separator ? separator + 1 : NULL;
964
965 if (strcmp(path, "/") == 0)
966 return of_node_get(of_root);
967
968 /* The path could begin with an alias */
969 if (*path != '/') {
970 int len;
971 const char *p = strchrnul(path, '/');
972
973 if (separator && separator < p)
974 p = separator;
975 len = p - path;
976
977 /* of_aliases must not be NULL */
978 if (!of_aliases)
979 return NULL;
980
981 for_each_property_of_node(of_aliases, pp) {
982 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
983 np = of_find_node_by_path(pp->value);
984 break;
985 }
986 }
987 if (!np)
988 return NULL;
989 path = p;
990 }
991
992 /* Step down the tree matching path components */
993 raw_spin_lock_irqsave(&devtree_lock, flags);
994 if (!np)
995 np = of_node_get(of_root);
996 np = __of_find_node_by_full_path(np, path);
997 raw_spin_unlock_irqrestore(&devtree_lock, flags);
998 return np;
999 }
1000 EXPORT_SYMBOL(of_find_node_opts_by_path);
1001
1002 /**
1003 * of_find_node_by_name - Find a node by its "name" property
1004 * @from: The node to start searching from or NULL; the node
1005 * you pass will not be searched, only the next one
1006 * will. Typically, you pass what the previous call
1007 * returned. of_node_put() will be called on @from.
1008 * @name: The name string to match against
1009 *
1010 * Return: A node pointer with refcount incremented, use
1011 * of_node_put() on it when done.
1012 */
of_find_node_by_name(struct device_node * from,const char * name)1013 struct device_node *of_find_node_by_name(struct device_node *from,
1014 const char *name)
1015 {
1016 struct device_node *np;
1017 unsigned long flags;
1018
1019 raw_spin_lock_irqsave(&devtree_lock, flags);
1020 for_each_of_allnodes_from(from, np)
1021 if (of_node_name_eq(np, name) && of_node_get(np))
1022 break;
1023 of_node_put(from);
1024 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1025 return np;
1026 }
1027 EXPORT_SYMBOL(of_find_node_by_name);
1028
1029 /**
1030 * of_find_node_by_type - Find a node by its "device_type" property
1031 * @from: The node to start searching from, or NULL to start searching
1032 * the entire device tree. The node you pass will not be
1033 * searched, only the next one will; typically, you pass
1034 * what the previous call returned. of_node_put() will be
1035 * called on from for you.
1036 * @type: The type string to match against
1037 *
1038 * Return: A node pointer with refcount incremented, use
1039 * of_node_put() on it when done.
1040 */
of_find_node_by_type(struct device_node * from,const char * type)1041 struct device_node *of_find_node_by_type(struct device_node *from,
1042 const char *type)
1043 {
1044 struct device_node *np;
1045 unsigned long flags;
1046
1047 raw_spin_lock_irqsave(&devtree_lock, flags);
1048 for_each_of_allnodes_from(from, np)
1049 if (__of_node_is_type(np, type) && of_node_get(np))
1050 break;
1051 of_node_put(from);
1052 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1053 return np;
1054 }
1055 EXPORT_SYMBOL(of_find_node_by_type);
1056
1057 /**
1058 * of_find_compatible_node - Find a node based on type and one of the
1059 * tokens in its "compatible" property
1060 * @from: The node to start searching from or NULL, the node
1061 * you pass will not be searched, only the next one
1062 * will; typically, you pass what the previous call
1063 * returned. of_node_put() will be called on it
1064 * @type: The type string to match "device_type" or NULL to ignore
1065 * @compatible: The string to match to one of the tokens in the device
1066 * "compatible" list.
1067 *
1068 * Return: A node pointer with refcount incremented, use
1069 * of_node_put() on it when done.
1070 */
of_find_compatible_node(struct device_node * from,const char * type,const char * compatible)1071 struct device_node *of_find_compatible_node(struct device_node *from,
1072 const char *type, const char *compatible)
1073 {
1074 struct device_node *np;
1075 unsigned long flags;
1076
1077 raw_spin_lock_irqsave(&devtree_lock, flags);
1078 for_each_of_allnodes_from(from, np)
1079 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1080 of_node_get(np))
1081 break;
1082 of_node_put(from);
1083 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1084 return np;
1085 }
1086 EXPORT_SYMBOL(of_find_compatible_node);
1087
1088 /**
1089 * of_find_node_with_property - Find a node which has a property with
1090 * the given name.
1091 * @from: The node to start searching from or NULL, the node
1092 * you pass will not be searched, only the next one
1093 * will; typically, you pass what the previous call
1094 * returned. of_node_put() will be called on it
1095 * @prop_name: The name of the property to look for.
1096 *
1097 * Return: A node pointer with refcount incremented, use
1098 * of_node_put() on it when done.
1099 */
of_find_node_with_property(struct device_node * from,const char * prop_name)1100 struct device_node *of_find_node_with_property(struct device_node *from,
1101 const char *prop_name)
1102 {
1103 struct device_node *np;
1104 unsigned long flags;
1105
1106 raw_spin_lock_irqsave(&devtree_lock, flags);
1107 for_each_of_allnodes_from(from, np) {
1108 if (__of_find_property(np, prop_name, NULL)) {
1109 of_node_get(np);
1110 break;
1111 }
1112 }
1113 of_node_put(from);
1114 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1115 return np;
1116 }
1117 EXPORT_SYMBOL(of_find_node_with_property);
1118
1119 static
__of_match_node(const struct of_device_id * matches,const struct device_node * node)1120 const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1121 const struct device_node *node)
1122 {
1123 const struct of_device_id *best_match = NULL;
1124 int score, best_score = 0;
1125
1126 if (!matches)
1127 return NULL;
1128
1129 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1130 score = __of_device_is_compatible(node, matches->compatible,
1131 matches->type, matches->name);
1132 if (score > best_score) {
1133 best_match = matches;
1134 best_score = score;
1135 }
1136 }
1137
1138 return best_match;
1139 }
1140
1141 /**
1142 * of_match_node - Tell if a device_node has a matching of_match structure
1143 * @matches: array of of device match structures to search in
1144 * @node: the of device structure to match against
1145 *
1146 * Low level utility function used by device matching.
1147 */
of_match_node(const struct of_device_id * matches,const struct device_node * node)1148 const struct of_device_id *of_match_node(const struct of_device_id *matches,
1149 const struct device_node *node)
1150 {
1151 const struct of_device_id *match;
1152 unsigned long flags;
1153
1154 raw_spin_lock_irqsave(&devtree_lock, flags);
1155 match = __of_match_node(matches, node);
1156 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1157 return match;
1158 }
1159 EXPORT_SYMBOL(of_match_node);
1160
1161 /**
1162 * of_find_matching_node_and_match - Find a node based on an of_device_id
1163 * match table.
1164 * @from: The node to start searching from or NULL, the node
1165 * you pass will not be searched, only the next one
1166 * will; typically, you pass what the previous call
1167 * returned. of_node_put() will be called on it
1168 * @matches: array of of device match structures to search in
1169 * @match: Updated to point at the matches entry which matched
1170 *
1171 * Return: A node pointer with refcount incremented, use
1172 * of_node_put() on it when done.
1173 */
of_find_matching_node_and_match(struct device_node * from,const struct of_device_id * matches,const struct of_device_id ** match)1174 struct device_node *of_find_matching_node_and_match(struct device_node *from,
1175 const struct of_device_id *matches,
1176 const struct of_device_id **match)
1177 {
1178 struct device_node *np;
1179 const struct of_device_id *m;
1180 unsigned long flags;
1181
1182 if (match)
1183 *match = NULL;
1184
1185 raw_spin_lock_irqsave(&devtree_lock, flags);
1186 for_each_of_allnodes_from(from, np) {
1187 m = __of_match_node(matches, np);
1188 if (m && of_node_get(np)) {
1189 if (match)
1190 *match = m;
1191 break;
1192 }
1193 }
1194 of_node_put(from);
1195 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1196 return np;
1197 }
1198 EXPORT_SYMBOL(of_find_matching_node_and_match);
1199
1200 /**
1201 * of_alias_from_compatible - Lookup appropriate alias for a device node
1202 * depending on compatible
1203 * @node: pointer to a device tree node
1204 * @alias: Pointer to buffer that alias value will be copied into
1205 * @len: Length of alias value
1206 *
1207 * Based on the value of the compatible property, this routine will attempt
1208 * to choose an appropriate alias value for a particular device tree node.
1209 * It does this by stripping the manufacturer prefix (as delimited by a ',')
1210 * from the first entry in the compatible list property.
1211 *
1212 * Note: The matching on just the "product" side of the compatible is a relic
1213 * from I2C and SPI. Please do not add any new user.
1214 *
1215 * Return: This routine returns 0 on success, <0 on failure.
1216 */
of_alias_from_compatible(const struct device_node * node,char * alias,int len)1217 int of_alias_from_compatible(const struct device_node *node, char *alias, int len)
1218 {
1219 const char *compatible, *p;
1220 int cplen;
1221
1222 compatible = of_get_property(node, "compatible", &cplen);
1223 if (!compatible || strlen(compatible) > cplen)
1224 return -ENODEV;
1225 p = strchr(compatible, ',');
1226 strscpy(alias, p ? p + 1 : compatible, len);
1227 return 0;
1228 }
1229 EXPORT_SYMBOL_GPL(of_alias_from_compatible);
1230
1231 /**
1232 * of_find_node_by_phandle - Find a node given a phandle
1233 * @handle: phandle of the node to find
1234 *
1235 * Return: A node pointer with refcount incremented, use
1236 * of_node_put() on it when done.
1237 */
of_find_node_by_phandle(phandle handle)1238 struct device_node *of_find_node_by_phandle(phandle handle)
1239 {
1240 struct device_node *np = NULL;
1241 unsigned long flags;
1242 u32 handle_hash;
1243
1244 if (!handle)
1245 return NULL;
1246
1247 handle_hash = of_phandle_cache_hash(handle);
1248
1249 raw_spin_lock_irqsave(&devtree_lock, flags);
1250
1251 if (phandle_cache[handle_hash] &&
1252 handle == phandle_cache[handle_hash]->phandle)
1253 np = phandle_cache[handle_hash];
1254
1255 if (!np) {
1256 for_each_of_allnodes(np)
1257 if (np->phandle == handle &&
1258 !of_node_check_flag(np, OF_DETACHED)) {
1259 phandle_cache[handle_hash] = np;
1260 break;
1261 }
1262 }
1263
1264 of_node_get(np);
1265 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1266 return np;
1267 }
1268 EXPORT_SYMBOL(of_find_node_by_phandle);
1269
of_print_phandle_args(const char * msg,const struct of_phandle_args * args)1270 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1271 {
1272 int i;
1273 printk("%s %pOF", msg, args->np);
1274 for (i = 0; i < args->args_count; i++) {
1275 const char delim = i ? ',' : ':';
1276
1277 pr_cont("%c%08x", delim, args->args[i]);
1278 }
1279 pr_cont("\n");
1280 }
1281
of_phandle_iterator_init(struct of_phandle_iterator * it,const struct device_node * np,const char * list_name,const char * cells_name,int cell_count)1282 int of_phandle_iterator_init(struct of_phandle_iterator *it,
1283 const struct device_node *np,
1284 const char *list_name,
1285 const char *cells_name,
1286 int cell_count)
1287 {
1288 const __be32 *list;
1289 int size;
1290
1291 memset(it, 0, sizeof(*it));
1292
1293 /*
1294 * one of cell_count or cells_name must be provided to determine the
1295 * argument length.
1296 */
1297 if (cell_count < 0 && !cells_name)
1298 return -EINVAL;
1299
1300 list = of_get_property(np, list_name, &size);
1301 if (!list)
1302 return -ENOENT;
1303
1304 it->cells_name = cells_name;
1305 it->cell_count = cell_count;
1306 it->parent = np;
1307 it->list_end = list + size / sizeof(*list);
1308 it->phandle_end = list;
1309 it->cur = list;
1310
1311 return 0;
1312 }
1313 EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1314
of_phandle_iterator_next(struct of_phandle_iterator * it)1315 int of_phandle_iterator_next(struct of_phandle_iterator *it)
1316 {
1317 uint32_t count = 0;
1318
1319 if (it->node) {
1320 of_node_put(it->node);
1321 it->node = NULL;
1322 }
1323
1324 if (!it->cur || it->phandle_end >= it->list_end)
1325 return -ENOENT;
1326
1327 it->cur = it->phandle_end;
1328
1329 /* If phandle is 0, then it is an empty entry with no arguments. */
1330 it->phandle = be32_to_cpup(it->cur++);
1331
1332 if (it->phandle) {
1333
1334 /*
1335 * Find the provider node and parse the #*-cells property to
1336 * determine the argument length.
1337 */
1338 it->node = of_find_node_by_phandle(it->phandle);
1339
1340 if (it->cells_name) {
1341 if (!it->node) {
1342 pr_err("%pOF: could not find phandle %d\n",
1343 it->parent, it->phandle);
1344 goto err;
1345 }
1346
1347 if (of_property_read_u32(it->node, it->cells_name,
1348 &count)) {
1349 /*
1350 * If both cell_count and cells_name is given,
1351 * fall back to cell_count in absence
1352 * of the cells_name property
1353 */
1354 if (it->cell_count >= 0) {
1355 count = it->cell_count;
1356 } else {
1357 pr_err("%pOF: could not get %s for %pOF\n",
1358 it->parent,
1359 it->cells_name,
1360 it->node);
1361 goto err;
1362 }
1363 }
1364 } else {
1365 count = it->cell_count;
1366 }
1367
1368 /*
1369 * Make sure that the arguments actually fit in the remaining
1370 * property data length
1371 */
1372 if (it->cur + count > it->list_end) {
1373 if (it->cells_name)
1374 pr_err("%pOF: %s = %d found %td\n",
1375 it->parent, it->cells_name,
1376 count, it->list_end - it->cur);
1377 else
1378 pr_err("%pOF: phandle %s needs %d, found %td\n",
1379 it->parent, of_node_full_name(it->node),
1380 count, it->list_end - it->cur);
1381 goto err;
1382 }
1383 }
1384
1385 it->phandle_end = it->cur + count;
1386 it->cur_count = count;
1387
1388 return 0;
1389
1390 err:
1391 if (it->node) {
1392 of_node_put(it->node);
1393 it->node = NULL;
1394 }
1395
1396 return -EINVAL;
1397 }
1398 EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1399
of_phandle_iterator_args(struct of_phandle_iterator * it,uint32_t * args,int size)1400 int of_phandle_iterator_args(struct of_phandle_iterator *it,
1401 uint32_t *args,
1402 int size)
1403 {
1404 int i, count;
1405
1406 count = it->cur_count;
1407
1408 if (WARN_ON(size < count))
1409 count = size;
1410
1411 for (i = 0; i < count; i++)
1412 args[i] = be32_to_cpup(it->cur++);
1413
1414 return count;
1415 }
1416
__of_parse_phandle_with_args(const struct device_node * np,const char * list_name,const char * cells_name,int cell_count,int index,struct of_phandle_args * out_args)1417 int __of_parse_phandle_with_args(const struct device_node *np,
1418 const char *list_name,
1419 const char *cells_name,
1420 int cell_count, int index,
1421 struct of_phandle_args *out_args)
1422 {
1423 struct of_phandle_iterator it;
1424 int rc, cur_index = 0;
1425
1426 if (index < 0)
1427 return -EINVAL;
1428
1429 /* Loop over the phandles until all the requested entry is found */
1430 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1431 /*
1432 * All of the error cases bail out of the loop, so at
1433 * this point, the parsing is successful. If the requested
1434 * index matches, then fill the out_args structure and return,
1435 * or return -ENOENT for an empty entry.
1436 */
1437 rc = -ENOENT;
1438 if (cur_index == index) {
1439 if (!it.phandle)
1440 goto err;
1441
1442 if (out_args) {
1443 int c;
1444
1445 c = of_phandle_iterator_args(&it,
1446 out_args->args,
1447 MAX_PHANDLE_ARGS);
1448 out_args->np = it.node;
1449 out_args->args_count = c;
1450 } else {
1451 of_node_put(it.node);
1452 }
1453
1454 /* Found it! return success */
1455 return 0;
1456 }
1457
1458 cur_index++;
1459 }
1460
1461 /*
1462 * Unlock node before returning result; will be one of:
1463 * -ENOENT : index is for empty phandle
1464 * -EINVAL : parsing error on data
1465 */
1466
1467 err:
1468 of_node_put(it.node);
1469 return rc;
1470 }
1471 EXPORT_SYMBOL(__of_parse_phandle_with_args);
1472
1473 /**
1474 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
1475 * @np: pointer to a device tree node containing a list
1476 * @list_name: property name that contains a list
1477 * @stem_name: stem of property names that specify phandles' arguments count
1478 * @index: index of a phandle to parse out
1479 * @out_args: optional pointer to output arguments structure (will be filled)
1480 *
1481 * This function is useful to parse lists of phandles and their arguments.
1482 * Returns 0 on success and fills out_args, on error returns appropriate errno
1483 * value. The difference between this function and of_parse_phandle_with_args()
1484 * is that this API remaps a phandle if the node the phandle points to has
1485 * a <@stem_name>-map property.
1486 *
1487 * Caller is responsible to call of_node_put() on the returned out_args->np
1488 * pointer.
1489 *
1490 * Example::
1491 *
1492 * phandle1: node1 {
1493 * #list-cells = <2>;
1494 * };
1495 *
1496 * phandle2: node2 {
1497 * #list-cells = <1>;
1498 * };
1499 *
1500 * phandle3: node3 {
1501 * #list-cells = <1>;
1502 * list-map = <0 &phandle2 3>,
1503 * <1 &phandle2 2>,
1504 * <2 &phandle1 5 1>;
1505 * list-map-mask = <0x3>;
1506 * };
1507 *
1508 * node4 {
1509 * list = <&phandle1 1 2 &phandle3 0>;
1510 * };
1511 *
1512 * To get a device_node of the ``node2`` node you may call this:
1513 * of_parse_phandle_with_args(node4, "list", "list", 1, &args);
1514 */
of_parse_phandle_with_args_map(const struct device_node * np,const char * list_name,const char * stem_name,int index,struct of_phandle_args * out_args)1515 int of_parse_phandle_with_args_map(const struct device_node *np,
1516 const char *list_name,
1517 const char *stem_name,
1518 int index, struct of_phandle_args *out_args)
1519 {
1520 char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1521 char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1522 char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1523 char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1524 struct device_node *cur, *new = NULL;
1525 const __be32 *map, *mask, *pass;
1526 static const __be32 dummy_mask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
1527 static const __be32 dummy_pass[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(0) };
1528 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1529 const __be32 *match_array = initial_match_array;
1530 int i, ret, map_len, match;
1531 u32 list_size, new_size;
1532
1533 if (index < 0)
1534 return -EINVAL;
1535
1536 if (!cells_name || !map_name || !mask_name || !pass_name)
1537 return -ENOMEM;
1538
1539 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1540 out_args);
1541 if (ret)
1542 return ret;
1543
1544 /* Get the #<list>-cells property */
1545 cur = out_args->np;
1546 ret = of_property_read_u32(cur, cells_name, &list_size);
1547 if (ret < 0)
1548 goto put;
1549
1550 /* Precalculate the match array - this simplifies match loop */
1551 for (i = 0; i < list_size; i++)
1552 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1553
1554 ret = -EINVAL;
1555 while (cur) {
1556 /* Get the <list>-map property */
1557 map = of_get_property(cur, map_name, &map_len);
1558 if (!map) {
1559 return 0;
1560 }
1561 map_len /= sizeof(u32);
1562
1563 /* Get the <list>-map-mask property (optional) */
1564 mask = of_get_property(cur, mask_name, NULL);
1565 if (!mask)
1566 mask = dummy_mask;
1567 /* Iterate through <list>-map property */
1568 match = 0;
1569 while (map_len > (list_size + 1) && !match) {
1570 /* Compare specifiers */
1571 match = 1;
1572 for (i = 0; i < list_size; i++, map_len--)
1573 match &= !((match_array[i] ^ *map++) & mask[i]);
1574
1575 of_node_put(new);
1576 new = of_find_node_by_phandle(be32_to_cpup(map));
1577 map++;
1578 map_len--;
1579
1580 /* Check if not found */
1581 if (!new) {
1582 ret = -EINVAL;
1583 goto put;
1584 }
1585
1586 if (!of_device_is_available(new))
1587 match = 0;
1588
1589 ret = of_property_read_u32(new, cells_name, &new_size);
1590 if (ret)
1591 goto put;
1592
1593 /* Check for malformed properties */
1594 if (WARN_ON(new_size > MAX_PHANDLE_ARGS) ||
1595 map_len < new_size) {
1596 ret = -EINVAL;
1597 goto put;
1598 }
1599
1600 /* Move forward by new node's #<list>-cells amount */
1601 map += new_size;
1602 map_len -= new_size;
1603 }
1604 if (!match) {
1605 ret = -ENOENT;
1606 goto put;
1607 }
1608
1609 /* Get the <list>-map-pass-thru property (optional) */
1610 pass = of_get_property(cur, pass_name, NULL);
1611 if (!pass)
1612 pass = dummy_pass;
1613
1614 /*
1615 * Successfully parsed a <list>-map translation; copy new
1616 * specifier into the out_args structure, keeping the
1617 * bits specified in <list>-map-pass-thru.
1618 */
1619 for (i = 0; i < new_size; i++) {
1620 __be32 val = *(map - new_size + i);
1621
1622 if (i < list_size) {
1623 val &= ~pass[i];
1624 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1625 }
1626
1627 initial_match_array[i] = val;
1628 out_args->args[i] = be32_to_cpu(val);
1629 }
1630 out_args->args_count = list_size = new_size;
1631 /* Iterate again with new provider */
1632 out_args->np = new;
1633 of_node_put(cur);
1634 cur = new;
1635 new = NULL;
1636 }
1637 put:
1638 of_node_put(cur);
1639 of_node_put(new);
1640 return ret;
1641 }
1642 EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1643
1644 /**
1645 * of_count_phandle_with_args() - Find the number of phandles references in a property
1646 * @np: pointer to a device tree node containing a list
1647 * @list_name: property name that contains a list
1648 * @cells_name: property name that specifies phandles' arguments count
1649 *
1650 * Return: The number of phandle + argument tuples within a property. It
1651 * is a typical pattern to encode a list of phandle and variable
1652 * arguments into a single property. The number of arguments is encoded
1653 * by a property in the phandle-target node. For example, a gpios
1654 * property would contain a list of GPIO specifies consisting of a
1655 * phandle and 1 or more arguments. The number of arguments are
1656 * determined by the #gpio-cells property in the node pointed to by the
1657 * phandle.
1658 */
of_count_phandle_with_args(const struct device_node * np,const char * list_name,const char * cells_name)1659 int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1660 const char *cells_name)
1661 {
1662 struct of_phandle_iterator it;
1663 int rc, cur_index = 0;
1664
1665 /*
1666 * If cells_name is NULL we assume a cell count of 0. This makes
1667 * counting the phandles trivial as each 32bit word in the list is a
1668 * phandle and no arguments are to consider. So we don't iterate through
1669 * the list but just use the length to determine the phandle count.
1670 */
1671 if (!cells_name) {
1672 const __be32 *list;
1673 int size;
1674
1675 list = of_get_property(np, list_name, &size);
1676 if (!list)
1677 return -ENOENT;
1678
1679 return size / sizeof(*list);
1680 }
1681
1682 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1683 if (rc)
1684 return rc;
1685
1686 while ((rc = of_phandle_iterator_next(&it)) == 0)
1687 cur_index += 1;
1688
1689 if (rc != -ENOENT)
1690 return rc;
1691
1692 return cur_index;
1693 }
1694 EXPORT_SYMBOL(of_count_phandle_with_args);
1695
__of_remove_property_from_list(struct property ** list,struct property * prop)1696 static struct property *__of_remove_property_from_list(struct property **list, struct property *prop)
1697 {
1698 struct property **next;
1699
1700 for (next = list; *next; next = &(*next)->next) {
1701 if (*next == prop) {
1702 *next = prop->next;
1703 prop->next = NULL;
1704 return prop;
1705 }
1706 }
1707 return NULL;
1708 }
1709
1710 /**
1711 * __of_add_property - Add a property to a node without lock operations
1712 * @np: Caller's Device Node
1713 * @prop: Property to add
1714 */
__of_add_property(struct device_node * np,struct property * prop)1715 int __of_add_property(struct device_node *np, struct property *prop)
1716 {
1717 int rc = 0;
1718 unsigned long flags;
1719 struct property **next;
1720
1721 raw_spin_lock_irqsave(&devtree_lock, flags);
1722
1723 __of_remove_property_from_list(&np->deadprops, prop);
1724
1725 prop->next = NULL;
1726 next = &np->properties;
1727 while (*next) {
1728 if (of_prop_cmp(prop->name, (*next)->name) == 0) {
1729 /* duplicate ! don't insert it */
1730 rc = -EEXIST;
1731 goto out_unlock;
1732 }
1733 next = &(*next)->next;
1734 }
1735 *next = prop;
1736
1737 out_unlock:
1738 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1739 if (rc)
1740 return rc;
1741
1742 __of_add_property_sysfs(np, prop);
1743 return 0;
1744 }
1745
1746 /**
1747 * of_add_property - Add a property to a node
1748 * @np: Caller's Device Node
1749 * @prop: Property to add
1750 */
of_add_property(struct device_node * np,struct property * prop)1751 int of_add_property(struct device_node *np, struct property *prop)
1752 {
1753 int rc;
1754
1755 mutex_lock(&of_mutex);
1756 rc = __of_add_property(np, prop);
1757 mutex_unlock(&of_mutex);
1758
1759 if (!rc)
1760 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1761
1762 return rc;
1763 }
1764 EXPORT_SYMBOL_GPL(of_add_property);
1765
__of_remove_property(struct device_node * np,struct property * prop)1766 int __of_remove_property(struct device_node *np, struct property *prop)
1767 {
1768 unsigned long flags;
1769 int rc = -ENODEV;
1770
1771 raw_spin_lock_irqsave(&devtree_lock, flags);
1772
1773 if (__of_remove_property_from_list(&np->properties, prop)) {
1774 /* Found the property, add it to deadprops list */
1775 prop->next = np->deadprops;
1776 np->deadprops = prop;
1777 rc = 0;
1778 }
1779
1780 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1781 if (rc)
1782 return rc;
1783
1784 __of_remove_property_sysfs(np, prop);
1785 return 0;
1786 }
1787
1788 /**
1789 * of_remove_property - Remove a property from a node.
1790 * @np: Caller's Device Node
1791 * @prop: Property to remove
1792 *
1793 * Note that we don't actually remove it, since we have given out
1794 * who-knows-how-many pointers to the data using get-property.
1795 * Instead we just move the property to the "dead properties"
1796 * list, so it won't be found any more.
1797 */
of_remove_property(struct device_node * np,struct property * prop)1798 int of_remove_property(struct device_node *np, struct property *prop)
1799 {
1800 int rc;
1801
1802 if (!prop)
1803 return -ENODEV;
1804
1805 mutex_lock(&of_mutex);
1806 rc = __of_remove_property(np, prop);
1807 mutex_unlock(&of_mutex);
1808
1809 if (!rc)
1810 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1811
1812 return rc;
1813 }
1814 EXPORT_SYMBOL_GPL(of_remove_property);
1815
__of_update_property(struct device_node * np,struct property * newprop,struct property ** oldpropp)1816 int __of_update_property(struct device_node *np, struct property *newprop,
1817 struct property **oldpropp)
1818 {
1819 struct property **next, *oldprop;
1820 unsigned long flags;
1821
1822 raw_spin_lock_irqsave(&devtree_lock, flags);
1823
1824 __of_remove_property_from_list(&np->deadprops, newprop);
1825
1826 for (next = &np->properties; *next; next = &(*next)->next) {
1827 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1828 break;
1829 }
1830 *oldpropp = oldprop = *next;
1831
1832 if (oldprop) {
1833 /* replace the node */
1834 newprop->next = oldprop->next;
1835 *next = newprop;
1836 oldprop->next = np->deadprops;
1837 np->deadprops = oldprop;
1838 } else {
1839 /* new node */
1840 newprop->next = NULL;
1841 *next = newprop;
1842 }
1843
1844 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1845
1846 __of_update_property_sysfs(np, newprop, oldprop);
1847
1848 return 0;
1849 }
1850
1851 /*
1852 * of_update_property - Update a property in a node, if the property does
1853 * not exist, add it.
1854 *
1855 * Note that we don't actually remove it, since we have given out
1856 * who-knows-how-many pointers to the data using get-property.
1857 * Instead we just move the property to the "dead properties" list,
1858 * and add the new property to the property list
1859 */
of_update_property(struct device_node * np,struct property * newprop)1860 int of_update_property(struct device_node *np, struct property *newprop)
1861 {
1862 struct property *oldprop;
1863 int rc;
1864
1865 if (!newprop->name)
1866 return -EINVAL;
1867
1868 mutex_lock(&of_mutex);
1869 rc = __of_update_property(np, newprop, &oldprop);
1870 mutex_unlock(&of_mutex);
1871
1872 if (!rc)
1873 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1874
1875 return rc;
1876 }
1877
of_alias_add(struct alias_prop * ap,struct device_node * np,int id,const char * stem,int stem_len)1878 static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1879 int id, const char *stem, int stem_len)
1880 {
1881 ap->np = np;
1882 ap->id = id;
1883 strscpy(ap->stem, stem, stem_len + 1);
1884 list_add_tail(&ap->link, &aliases_lookup);
1885 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1886 ap->alias, ap->stem, ap->id, np);
1887 }
1888
1889 /**
1890 * of_alias_scan - Scan all properties of the 'aliases' node
1891 * @dt_alloc: An allocator that provides a virtual address to memory
1892 * for storing the resulting tree
1893 *
1894 * The function scans all the properties of the 'aliases' node and populates
1895 * the global lookup table with the properties.
1896 */
of_alias_scan(void * (* dt_alloc)(u64 size,u64 align))1897 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1898 {
1899 const struct property *pp;
1900
1901 of_aliases = of_find_node_by_path("/aliases");
1902 of_chosen = of_find_node_by_path("/chosen");
1903 if (of_chosen == NULL)
1904 of_chosen = of_find_node_by_path("/chosen@0");
1905
1906 if (of_chosen) {
1907 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */
1908 const char *name = NULL;
1909
1910 if (of_property_read_string(of_chosen, "stdout-path", &name))
1911 of_property_read_string(of_chosen, "linux,stdout-path",
1912 &name);
1913 if (IS_ENABLED(CONFIG_PPC) && !name)
1914 of_property_read_string(of_aliases, "stdout", &name);
1915 if (name)
1916 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1917 if (of_stdout)
1918 of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
1919 }
1920
1921 if (!of_aliases)
1922 return;
1923
1924 for_each_property_of_node(of_aliases, pp) {
1925 const char *start = pp->name;
1926 const char *end = start + strlen(start);
1927 struct device_node *np;
1928 struct alias_prop *ap;
1929 int id, len;
1930
1931 /* Skip those we do not want to proceed */
1932 if (is_pseudo_property(pp->name))
1933 continue;
1934
1935 np = of_find_node_by_path(pp->value);
1936 if (!np)
1937 continue;
1938
1939 /* walk the alias backwards to extract the id and work out
1940 * the 'stem' string */
1941 while (isdigit(*(end-1)) && end > start)
1942 end--;
1943 len = end - start;
1944
1945 if (kstrtoint(end, 10, &id) < 0) {
1946 of_node_put(np);
1947 continue;
1948 }
1949
1950 /* Allocate an alias_prop with enough space for the stem */
1951 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
1952 if (!ap) {
1953 of_node_put(np);
1954 continue;
1955 }
1956 memset(ap, 0, sizeof(*ap) + len + 1);
1957 ap->alias = start;
1958 of_alias_add(ap, np, id, start, len);
1959 }
1960 }
1961
1962 /**
1963 * of_alias_get_id - Get alias id for the given device_node
1964 * @np: Pointer to the given device_node
1965 * @stem: Alias stem of the given device_node
1966 *
1967 * The function travels the lookup table to get the alias id for the given
1968 * device_node and alias stem.
1969 *
1970 * Return: The alias id if found.
1971 */
of_alias_get_id(const struct device_node * np,const char * stem)1972 int of_alias_get_id(const struct device_node *np, const char *stem)
1973 {
1974 struct alias_prop *app;
1975 int id = -ENODEV;
1976
1977 mutex_lock(&of_mutex);
1978 list_for_each_entry(app, &aliases_lookup, link) {
1979 if (strcmp(app->stem, stem) != 0)
1980 continue;
1981
1982 if (np == app->np) {
1983 id = app->id;
1984 break;
1985 }
1986 }
1987 mutex_unlock(&of_mutex);
1988
1989 return id;
1990 }
1991 EXPORT_SYMBOL_GPL(of_alias_get_id);
1992
1993 /**
1994 * of_alias_get_highest_id - Get highest alias id for the given stem
1995 * @stem: Alias stem to be examined
1996 *
1997 * The function travels the lookup table to get the highest alias id for the
1998 * given alias stem. It returns the alias id if found.
1999 */
of_alias_get_highest_id(const char * stem)2000 int of_alias_get_highest_id(const char *stem)
2001 {
2002 struct alias_prop *app;
2003 int id = -ENODEV;
2004
2005 mutex_lock(&of_mutex);
2006 list_for_each_entry(app, &aliases_lookup, link) {
2007 if (strcmp(app->stem, stem) != 0)
2008 continue;
2009
2010 if (app->id > id)
2011 id = app->id;
2012 }
2013 mutex_unlock(&of_mutex);
2014
2015 return id;
2016 }
2017 EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2018
2019 /**
2020 * of_console_check() - Test and setup console for DT setup
2021 * @dn: Pointer to device node
2022 * @name: Name to use for preferred console without index. ex. "ttyS"
2023 * @index: Index to use for preferred console.
2024 *
2025 * Check if the given device node matches the stdout-path property in the
2026 * /chosen node. If it does then register it as the preferred console.
2027 *
2028 * Return: TRUE if console successfully setup. Otherwise return FALSE.
2029 */
of_console_check(const struct device_node * dn,char * name,int index)2030 bool of_console_check(const struct device_node *dn, char *name, int index)
2031 {
2032 if (!dn || dn != of_stdout || console_set_on_cmdline)
2033 return false;
2034
2035 /*
2036 * XXX: cast `options' to char pointer to suppress complication
2037 * warnings: printk, UART and console drivers expect char pointer.
2038 */
2039 return !add_preferred_console(name, index, (char *)of_stdout_options);
2040 }
2041 EXPORT_SYMBOL_GPL(of_console_check);
2042
2043 /**
2044 * of_find_next_cache_node - Find a node's subsidiary cache
2045 * @np: node of type "cpu" or "cache"
2046 *
2047 * Return: A node pointer with refcount incremented, use
2048 * of_node_put() on it when done. Caller should hold a reference
2049 * to np.
2050 */
of_find_next_cache_node(const struct device_node * np)2051 struct device_node *of_find_next_cache_node(const struct device_node *np)
2052 {
2053 struct device_node *child, *cache_node;
2054
2055 cache_node = of_parse_phandle(np, "l2-cache", 0);
2056 if (!cache_node)
2057 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2058
2059 if (cache_node)
2060 return cache_node;
2061
2062 /* OF on pmac has nodes instead of properties named "l2-cache"
2063 * beneath CPU nodes.
2064 */
2065 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2066 for_each_child_of_node(np, child)
2067 if (of_node_is_type(child, "cache"))
2068 return child;
2069
2070 return NULL;
2071 }
2072
2073 /**
2074 * of_find_last_cache_level - Find the level at which the last cache is
2075 * present for the given logical cpu
2076 *
2077 * @cpu: cpu number(logical index) for which the last cache level is needed
2078 *
2079 * Return: The level at which the last cache is present. It is exactly
2080 * same as the total number of cache levels for the given logical cpu.
2081 */
of_find_last_cache_level(unsigned int cpu)2082 int of_find_last_cache_level(unsigned int cpu)
2083 {
2084 u32 cache_level = 0;
2085 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2086
2087 while (np) {
2088 of_node_put(prev);
2089 prev = np;
2090 np = of_find_next_cache_node(np);
2091 }
2092
2093 of_property_read_u32(prev, "cache-level", &cache_level);
2094 of_node_put(prev);
2095
2096 return cache_level;
2097 }
2098
2099 /**
2100 * of_map_id - Translate an ID through a downstream mapping.
2101 * @np: root complex device node.
2102 * @id: device ID to map.
2103 * @map_name: property name of the map to use.
2104 * @map_mask_name: optional property name of the mask to use.
2105 * @target: optional pointer to a target device node.
2106 * @id_out: optional pointer to receive the translated ID.
2107 *
2108 * Given a device ID, look up the appropriate implementation-defined
2109 * platform ID and/or the target device which receives transactions on that
2110 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
2111 * @id_out may be NULL if only the other is required. If @target points to
2112 * a non-NULL device node pointer, only entries targeting that node will be
2113 * matched; if it points to a NULL value, it will receive the device node of
2114 * the first matching target phandle, with a reference held.
2115 *
2116 * Return: 0 on success or a standard error code on failure.
2117 */
of_map_id(const struct device_node * np,u32 id,const char * map_name,const char * map_mask_name,struct device_node ** target,u32 * id_out)2118 int of_map_id(const struct device_node *np, u32 id,
2119 const char *map_name, const char *map_mask_name,
2120 struct device_node **target, u32 *id_out)
2121 {
2122 u32 map_mask, masked_id;
2123 int map_len;
2124 const __be32 *map = NULL;
2125
2126 if (!np || !map_name || (!target && !id_out))
2127 return -EINVAL;
2128
2129 map = of_get_property(np, map_name, &map_len);
2130 if (!map) {
2131 if (target)
2132 return -ENODEV;
2133 /* Otherwise, no map implies no translation */
2134 *id_out = id;
2135 return 0;
2136 }
2137
2138 if (!map_len || map_len % (4 * sizeof(*map))) {
2139 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2140 map_name, map_len);
2141 return -EINVAL;
2142 }
2143
2144 /* The default is to select all bits. */
2145 map_mask = 0xffffffff;
2146
2147 /*
2148 * Can be overridden by "{iommu,msi}-map-mask" property.
2149 * If of_property_read_u32() fails, the default is used.
2150 */
2151 if (map_mask_name)
2152 of_property_read_u32(np, map_mask_name, &map_mask);
2153
2154 masked_id = map_mask & id;
2155 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2156 struct device_node *phandle_node;
2157 u32 id_base = be32_to_cpup(map + 0);
2158 u32 phandle = be32_to_cpup(map + 1);
2159 u32 out_base = be32_to_cpup(map + 2);
2160 u32 id_len = be32_to_cpup(map + 3);
2161
2162 if (id_base & ~map_mask) {
2163 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2164 np, map_name, map_name,
2165 map_mask, id_base);
2166 return -EFAULT;
2167 }
2168
2169 if (masked_id < id_base || masked_id >= id_base + id_len)
2170 continue;
2171
2172 phandle_node = of_find_node_by_phandle(phandle);
2173 if (!phandle_node)
2174 return -ENODEV;
2175
2176 if (target) {
2177 if (*target)
2178 of_node_put(phandle_node);
2179 else
2180 *target = phandle_node;
2181
2182 if (*target != phandle_node)
2183 continue;
2184 }
2185
2186 if (id_out)
2187 *id_out = masked_id - id_base + out_base;
2188
2189 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2190 np, map_name, map_mask, id_base, out_base,
2191 id_len, id, masked_id - id_base + out_base);
2192 return 0;
2193 }
2194
2195 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2196 id, target && *target ? *target : NULL);
2197
2198 /* Bypasses translation */
2199 if (id_out)
2200 *id_out = id;
2201 return 0;
2202 }
2203 EXPORT_SYMBOL_GPL(of_map_id);
2204