1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Procedures for creating, accessing and interpreting the device tree. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 * 11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net 12 * 13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and 14 * Grant Likely. 15 */ 16 17 #define pr_fmt(fmt) "OF: " fmt 18 19 #include <linux/cleanup.h> 20 #include <linux/console.h> 21 #include <linux/ctype.h> 22 #include <linux/cpu.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/of_device.h> 26 #include <linux/of_graph.h> 27 #include <linux/spinlock.h> 28 #include <linux/slab.h> 29 #include <linux/string.h> 30 #include <linux/proc_fs.h> 31 32 #include "of_private.h" 33 34 LIST_HEAD(aliases_lookup); 35 36 struct device_node *of_root; 37 EXPORT_SYMBOL(of_root); 38 struct device_node *of_chosen; 39 EXPORT_SYMBOL(of_chosen); 40 struct device_node *of_aliases; 41 struct device_node *of_stdout; 42 static const char *of_stdout_options; 43 44 struct kset *of_kset; 45 46 /* 47 * Used to protect the of_aliases, to hold off addition of nodes to sysfs. 48 * This mutex must be held whenever modifications are being made to the 49 * device tree. The of_{attach,detach}_node() and 50 * of_{add,remove,update}_property() helpers make sure this happens. 51 */ 52 DEFINE_MUTEX(of_mutex); 53 54 /* use when traversing tree through the child, sibling, 55 * or parent members of struct device_node. 56 */ 57 DEFINE_RAW_SPINLOCK(devtree_lock); 58 59 bool of_node_name_eq(const struct device_node *np, const char *name) 60 { 61 const char *node_name; 62 size_t len; 63 64 if (!np) 65 return false; 66 67 node_name = kbasename(np->full_name); 68 len = strchrnul(node_name, '@') - node_name; 69 70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); 71 } 72 EXPORT_SYMBOL(of_node_name_eq); 73 74 bool of_node_name_prefix(const struct device_node *np, const char *prefix) 75 { 76 if (!np) 77 return false; 78 79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; 80 } 81 EXPORT_SYMBOL(of_node_name_prefix); 82 83 static bool __of_node_is_type(const struct device_node *np, const char *type) 84 { 85 const char *match = __of_get_property(np, "device_type", NULL); 86 87 return np && match && type && !strcmp(match, type); 88 } 89 90 #define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \ 91 IS_ENABLED(CONFIG_SPARC) \ 92 ) 93 94 int of_bus_n_addr_cells(struct device_node *np) 95 { 96 u32 cells; 97 98 for (; np; np = np->parent) { 99 if (!of_property_read_u32(np, "#address-cells", &cells)) 100 return cells; 101 /* 102 * Default root value and walking parent nodes for "#address-cells" 103 * is deprecated. Any platforms which hit this warning should 104 * be added to the excluded list. 105 */ 106 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 107 "Missing '#address-cells' in %pOF\n", np); 108 } 109 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 110 } 111 112 int of_n_addr_cells(struct device_node *np) 113 { 114 if (np->parent) 115 np = np->parent; 116 117 return of_bus_n_addr_cells(np); 118 } 119 EXPORT_SYMBOL(of_n_addr_cells); 120 121 int of_bus_n_size_cells(struct device_node *np) 122 { 123 u32 cells; 124 125 for (; np; np = np->parent) { 126 if (!of_property_read_u32(np, "#size-cells", &cells)) 127 return cells; 128 /* 129 * Default root value and walking parent nodes for "#size-cells" 130 * is deprecated. Any platforms which hit this warning should 131 * be added to the excluded list. 132 */ 133 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 134 "Missing '#size-cells' in %pOF\n", np); 135 } 136 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 137 } 138 139 int of_n_size_cells(struct device_node *np) 140 { 141 if (np->parent) 142 np = np->parent; 143 144 return of_bus_n_size_cells(np); 145 } 146 EXPORT_SYMBOL(of_n_size_cells); 147 148 #ifdef CONFIG_NUMA 149 int __weak of_node_to_nid(struct device_node *np) 150 { 151 return NUMA_NO_NODE; 152 } 153 #endif 154 155 #define OF_PHANDLE_CACHE_BITS 7 156 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) 157 158 static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; 159 160 static u32 of_phandle_cache_hash(phandle handle) 161 { 162 return hash_32(handle, OF_PHANDLE_CACHE_BITS); 163 } 164 165 /* 166 * Caller must hold devtree_lock. 167 */ 168 void __of_phandle_cache_inv_entry(phandle handle) 169 { 170 u32 handle_hash; 171 struct device_node *np; 172 173 if (!handle) 174 return; 175 176 handle_hash = of_phandle_cache_hash(handle); 177 178 np = phandle_cache[handle_hash]; 179 if (np && handle == np->phandle) 180 phandle_cache[handle_hash] = NULL; 181 } 182 183 void __init of_core_init(void) 184 { 185 struct device_node *np; 186 187 of_platform_register_reconfig_notifier(); 188 189 /* Create the kset, and register existing nodes */ 190 mutex_lock(&of_mutex); 191 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 192 if (!of_kset) { 193 mutex_unlock(&of_mutex); 194 pr_err("failed to register existing nodes\n"); 195 return; 196 } 197 for_each_of_allnodes(np) { 198 __of_attach_node_sysfs(np); 199 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) 200 phandle_cache[of_phandle_cache_hash(np->phandle)] = np; 201 } 202 mutex_unlock(&of_mutex); 203 204 /* Symlink in /proc as required by userspace ABI */ 205 if (of_root) 206 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 207 } 208 209 static struct property *__of_find_property(const struct device_node *np, 210 const char *name, int *lenp) 211 { 212 struct property *pp; 213 214 if (!np) 215 return NULL; 216 217 for (pp = np->properties; pp; pp = pp->next) { 218 if (of_prop_cmp(pp->name, name) == 0) { 219 if (lenp) 220 *lenp = pp->length; 221 break; 222 } 223 } 224 225 return pp; 226 } 227 228 struct property *of_find_property(const struct device_node *np, 229 const char *name, 230 int *lenp) 231 { 232 struct property *pp; 233 unsigned long flags; 234 235 raw_spin_lock_irqsave(&devtree_lock, flags); 236 pp = __of_find_property(np, name, lenp); 237 raw_spin_unlock_irqrestore(&devtree_lock, flags); 238 239 return pp; 240 } 241 EXPORT_SYMBOL(of_find_property); 242 243 struct device_node *__of_find_all_nodes(struct device_node *prev) 244 { 245 struct device_node *np; 246 if (!prev) { 247 np = of_root; 248 } else if (prev->child) { 249 np = prev->child; 250 } else { 251 /* Walk back up looking for a sibling, or the end of the structure */ 252 np = prev; 253 while (np->parent && !np->sibling) 254 np = np->parent; 255 np = np->sibling; /* Might be null at the end of the tree */ 256 } 257 return np; 258 } 259 260 /** 261 * of_find_all_nodes - Get next node in global list 262 * @prev: Previous node or NULL to start iteration 263 * of_node_put() will be called on it 264 * 265 * Return: A node pointer with refcount incremented, use 266 * of_node_put() on it when done. 267 */ 268 struct device_node *of_find_all_nodes(struct device_node *prev) 269 { 270 struct device_node *np; 271 unsigned long flags; 272 273 raw_spin_lock_irqsave(&devtree_lock, flags); 274 np = __of_find_all_nodes(prev); 275 of_node_get(np); 276 of_node_put(prev); 277 raw_spin_unlock_irqrestore(&devtree_lock, flags); 278 return np; 279 } 280 EXPORT_SYMBOL(of_find_all_nodes); 281 282 /* 283 * Find a property with a given name for a given node 284 * and return the value. 285 */ 286 const void *__of_get_property(const struct device_node *np, 287 const char *name, int *lenp) 288 { 289 const struct property *pp = __of_find_property(np, name, lenp); 290 291 return pp ? pp->value : NULL; 292 } 293 294 /* 295 * Find a property with a given name for a given node 296 * and return the value. 297 */ 298 const void *of_get_property(const struct device_node *np, const char *name, 299 int *lenp) 300 { 301 const struct property *pp = of_find_property(np, name, lenp); 302 303 return pp ? pp->value : NULL; 304 } 305 EXPORT_SYMBOL(of_get_property); 306 307 /** 308 * __of_device_is_compatible() - Check if the node matches given constraints 309 * @device: pointer to node 310 * @compat: required compatible string, NULL or "" for any match 311 * @type: required device_type value, NULL or "" for any match 312 * @name: required node name, NULL or "" for any match 313 * 314 * Checks if the given @compat, @type and @name strings match the 315 * properties of the given @device. A constraints can be skipped by 316 * passing NULL or an empty string as the constraint. 317 * 318 * Returns 0 for no match, and a positive integer on match. The return 319 * value is a relative score with larger values indicating better 320 * matches. The score is weighted for the most specific compatible value 321 * to get the highest score. Matching type is next, followed by matching 322 * name. Practically speaking, this results in the following priority 323 * order for matches: 324 * 325 * 1. specific compatible && type && name 326 * 2. specific compatible && type 327 * 3. specific compatible && name 328 * 4. specific compatible 329 * 5. general compatible && type && name 330 * 6. general compatible && type 331 * 7. general compatible && name 332 * 8. general compatible 333 * 9. type && name 334 * 10. type 335 * 11. name 336 */ 337 static int __of_device_is_compatible(const struct device_node *device, 338 const char *compat, const char *type, const char *name) 339 { 340 const struct property *prop; 341 const char *cp; 342 int index = 0, score = 0; 343 344 /* Compatible match has highest priority */ 345 if (compat && compat[0]) { 346 prop = __of_find_property(device, "compatible", NULL); 347 for (cp = of_prop_next_string(prop, NULL); cp; 348 cp = of_prop_next_string(prop, cp), index++) { 349 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 350 score = INT_MAX/2 - (index << 2); 351 break; 352 } 353 } 354 if (!score) 355 return 0; 356 } 357 358 /* Matching type is better than matching name */ 359 if (type && type[0]) { 360 if (!__of_node_is_type(device, type)) 361 return 0; 362 score += 2; 363 } 364 365 /* Matching name is a bit better than not */ 366 if (name && name[0]) { 367 if (!of_node_name_eq(device, name)) 368 return 0; 369 score++; 370 } 371 372 return score; 373 } 374 375 /** Checks if the given "compat" string matches one of the strings in 376 * the device's "compatible" property 377 */ 378 int of_device_is_compatible(const struct device_node *device, 379 const char *compat) 380 { 381 unsigned long flags; 382 int res; 383 384 raw_spin_lock_irqsave(&devtree_lock, flags); 385 res = __of_device_is_compatible(device, compat, NULL, NULL); 386 raw_spin_unlock_irqrestore(&devtree_lock, flags); 387 return res; 388 } 389 EXPORT_SYMBOL(of_device_is_compatible); 390 391 /** Checks if the device is compatible with any of the entries in 392 * a NULL terminated array of strings. Returns the best match 393 * score or 0. 394 */ 395 int of_device_compatible_match(const struct device_node *device, 396 const char *const *compat) 397 { 398 unsigned int tmp, score = 0; 399 400 if (!compat) 401 return 0; 402 403 while (*compat) { 404 tmp = of_device_is_compatible(device, *compat); 405 if (tmp > score) 406 score = tmp; 407 compat++; 408 } 409 410 return score; 411 } 412 EXPORT_SYMBOL_GPL(of_device_compatible_match); 413 414 /** 415 * of_machine_compatible_match - Test root of device tree against a compatible array 416 * @compats: NULL terminated array of compatible strings to look for in root node's compatible property. 417 * 418 * Returns true if the root node has any of the given compatible values in its 419 * compatible property. 420 */ 421 bool of_machine_compatible_match(const char *const *compats) 422 { 423 struct device_node *root; 424 int rc = 0; 425 426 root = of_find_node_by_path("/"); 427 if (root) { 428 rc = of_device_compatible_match(root, compats); 429 of_node_put(root); 430 } 431 432 return rc != 0; 433 } 434 EXPORT_SYMBOL(of_machine_compatible_match); 435 436 static bool __of_device_is_status(const struct device_node *device, 437 const char * const*strings) 438 { 439 const char *status; 440 int statlen; 441 442 if (!device) 443 return false; 444 445 status = __of_get_property(device, "status", &statlen); 446 if (status == NULL) 447 return false; 448 449 if (statlen > 0) { 450 while (*strings) { 451 unsigned int len = strlen(*strings); 452 453 if ((*strings)[len - 1] == '-') { 454 if (!strncmp(status, *strings, len)) 455 return true; 456 } else { 457 if (!strcmp(status, *strings)) 458 return true; 459 } 460 strings++; 461 } 462 } 463 464 return false; 465 } 466 467 /** 468 * __of_device_is_available - check if a device is available for use 469 * 470 * @device: Node to check for availability, with locks already held 471 * 472 * Return: True if the status property is absent or set to "okay" or "ok", 473 * false otherwise 474 */ 475 static bool __of_device_is_available(const struct device_node *device) 476 { 477 static const char * const ok[] = {"okay", "ok", NULL}; 478 479 if (!device) 480 return false; 481 482 return !__of_get_property(device, "status", NULL) || 483 __of_device_is_status(device, ok); 484 } 485 486 /** 487 * __of_device_is_reserved - check if a device is reserved 488 * 489 * @device: Node to check for availability, with locks already held 490 * 491 * Return: True if the status property is set to "reserved", false otherwise 492 */ 493 static bool __of_device_is_reserved(const struct device_node *device) 494 { 495 static const char * const reserved[] = {"reserved", NULL}; 496 497 return __of_device_is_status(device, reserved); 498 } 499 500 /** 501 * of_device_is_available - check if a device is available for use 502 * 503 * @device: Node to check for availability 504 * 505 * Return: True if the status property is absent or set to "okay" or "ok", 506 * false otherwise 507 */ 508 bool of_device_is_available(const struct device_node *device) 509 { 510 unsigned long flags; 511 bool res; 512 513 raw_spin_lock_irqsave(&devtree_lock, flags); 514 res = __of_device_is_available(device); 515 raw_spin_unlock_irqrestore(&devtree_lock, flags); 516 return res; 517 518 } 519 EXPORT_SYMBOL(of_device_is_available); 520 521 /** 522 * __of_device_is_fail - check if a device has status "fail" or "fail-..." 523 * 524 * @device: Node to check status for, with locks already held 525 * 526 * Return: True if the status property is set to "fail" or "fail-..." (for any 527 * error code suffix), false otherwise 528 */ 529 static bool __of_device_is_fail(const struct device_node *device) 530 { 531 static const char * const fail[] = {"fail", "fail-", NULL}; 532 533 return __of_device_is_status(device, fail); 534 } 535 536 /** 537 * of_device_is_big_endian - check if a device has BE registers 538 * 539 * @device: Node to check for endianness 540 * 541 * Return: True if the device has a "big-endian" property, or if the kernel 542 * was compiled for BE *and* the device has a "native-endian" property. 543 * Returns false otherwise. 544 * 545 * Callers would nominally use ioread32be/iowrite32be if 546 * of_device_is_big_endian() == true, or readl/writel otherwise. 547 */ 548 bool of_device_is_big_endian(const struct device_node *device) 549 { 550 if (of_property_read_bool(device, "big-endian")) 551 return true; 552 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && 553 of_property_read_bool(device, "native-endian")) 554 return true; 555 return false; 556 } 557 EXPORT_SYMBOL(of_device_is_big_endian); 558 559 /** 560 * of_get_parent - Get a node's parent if any 561 * @node: Node to get parent 562 * 563 * Return: A node pointer with refcount incremented, use 564 * of_node_put() on it when done. 565 */ 566 struct device_node *of_get_parent(const struct device_node *node) 567 { 568 struct device_node *np; 569 unsigned long flags; 570 571 if (!node) 572 return NULL; 573 574 raw_spin_lock_irqsave(&devtree_lock, flags); 575 np = of_node_get(node->parent); 576 raw_spin_unlock_irqrestore(&devtree_lock, flags); 577 return np; 578 } 579 EXPORT_SYMBOL(of_get_parent); 580 581 /** 582 * of_get_next_parent - Iterate to a node's parent 583 * @node: Node to get parent of 584 * 585 * This is like of_get_parent() except that it drops the 586 * refcount on the passed node, making it suitable for iterating 587 * through a node's parents. 588 * 589 * Return: A node pointer with refcount incremented, use 590 * of_node_put() on it when done. 591 */ 592 struct device_node *of_get_next_parent(struct device_node *node) 593 { 594 struct device_node *parent; 595 unsigned long flags; 596 597 if (!node) 598 return NULL; 599 600 raw_spin_lock_irqsave(&devtree_lock, flags); 601 parent = of_node_get(node->parent); 602 of_node_put(node); 603 raw_spin_unlock_irqrestore(&devtree_lock, flags); 604 return parent; 605 } 606 EXPORT_SYMBOL(of_get_next_parent); 607 608 static struct device_node *__of_get_next_child(const struct device_node *node, 609 struct device_node *prev) 610 { 611 struct device_node *next; 612 613 if (!node) 614 return NULL; 615 616 next = prev ? prev->sibling : node->child; 617 of_node_get(next); 618 of_node_put(prev); 619 return next; 620 } 621 #define __for_each_child_of_node(parent, child) \ 622 for (child = __of_get_next_child(parent, NULL); child != NULL; \ 623 child = __of_get_next_child(parent, child)) 624 625 /** 626 * of_get_next_child - Iterate a node childs 627 * @node: parent node 628 * @prev: previous child of the parent node, or NULL to get first 629 * 630 * Return: A node pointer with refcount incremented, use of_node_put() on 631 * it when done. Returns NULL when prev is the last child. Decrements the 632 * refcount of prev. 633 */ 634 struct device_node *of_get_next_child(const struct device_node *node, 635 struct device_node *prev) 636 { 637 struct device_node *next; 638 unsigned long flags; 639 640 raw_spin_lock_irqsave(&devtree_lock, flags); 641 next = __of_get_next_child(node, prev); 642 raw_spin_unlock_irqrestore(&devtree_lock, flags); 643 return next; 644 } 645 EXPORT_SYMBOL(of_get_next_child); 646 647 static struct device_node *of_get_next_status_child(const struct device_node *node, 648 struct device_node *prev, 649 bool (*checker)(const struct device_node *)) 650 { 651 struct device_node *next; 652 unsigned long flags; 653 654 if (!node) 655 return NULL; 656 657 raw_spin_lock_irqsave(&devtree_lock, flags); 658 next = prev ? prev->sibling : node->child; 659 for (; next; next = next->sibling) { 660 if (!checker(next)) 661 continue; 662 if (of_node_get(next)) 663 break; 664 } 665 of_node_put(prev); 666 raw_spin_unlock_irqrestore(&devtree_lock, flags); 667 return next; 668 } 669 670 /** 671 * of_get_next_available_child - Find the next available child node 672 * @node: parent node 673 * @prev: previous child of the parent node, or NULL to get first 674 * 675 * This function is like of_get_next_child(), except that it 676 * automatically skips any disabled nodes (i.e. status = "disabled"). 677 */ 678 struct device_node *of_get_next_available_child(const struct device_node *node, 679 struct device_node *prev) 680 { 681 return of_get_next_status_child(node, prev, __of_device_is_available); 682 } 683 EXPORT_SYMBOL(of_get_next_available_child); 684 685 /** 686 * of_get_next_reserved_child - Find the next reserved child node 687 * @node: parent node 688 * @prev: previous child of the parent node, or NULL to get first 689 * 690 * This function is like of_get_next_child(), except that it 691 * automatically skips any disabled nodes (i.e. status = "disabled"). 692 */ 693 struct device_node *of_get_next_reserved_child(const struct device_node *node, 694 struct device_node *prev) 695 { 696 return of_get_next_status_child(node, prev, __of_device_is_reserved); 697 } 698 EXPORT_SYMBOL(of_get_next_reserved_child); 699 700 /** 701 * of_get_next_cpu_node - Iterate on cpu nodes 702 * @prev: previous child of the /cpus node, or NULL to get first 703 * 704 * Unusable CPUs (those with the status property set to "fail" or "fail-...") 705 * will be skipped. 706 * 707 * Return: A cpu node pointer with refcount incremented, use of_node_put() 708 * on it when done. Returns NULL when prev is the last child. Decrements 709 * the refcount of prev. 710 */ 711 struct device_node *of_get_next_cpu_node(struct device_node *prev) 712 { 713 struct device_node *next = NULL; 714 unsigned long flags; 715 struct device_node *node; 716 717 if (!prev) 718 node = of_find_node_by_path("/cpus"); 719 720 raw_spin_lock_irqsave(&devtree_lock, flags); 721 if (prev) 722 next = prev->sibling; 723 else if (node) { 724 next = node->child; 725 of_node_put(node); 726 } 727 for (; next; next = next->sibling) { 728 if (__of_device_is_fail(next)) 729 continue; 730 if (!(of_node_name_eq(next, "cpu") || 731 __of_node_is_type(next, "cpu"))) 732 continue; 733 if (of_node_get(next)) 734 break; 735 } 736 of_node_put(prev); 737 raw_spin_unlock_irqrestore(&devtree_lock, flags); 738 return next; 739 } 740 EXPORT_SYMBOL(of_get_next_cpu_node); 741 742 /** 743 * of_get_compatible_child - Find compatible child node 744 * @parent: parent node 745 * @compatible: compatible string 746 * 747 * Lookup child node whose compatible property contains the given compatible 748 * string. 749 * 750 * Return: a node pointer with refcount incremented, use of_node_put() on it 751 * when done; or NULL if not found. 752 */ 753 struct device_node *of_get_compatible_child(const struct device_node *parent, 754 const char *compatible) 755 { 756 struct device_node *child; 757 758 for_each_child_of_node(parent, child) { 759 if (of_device_is_compatible(child, compatible)) 760 break; 761 } 762 763 return child; 764 } 765 EXPORT_SYMBOL(of_get_compatible_child); 766 767 /** 768 * of_get_child_by_name - Find the child node by name for a given parent 769 * @node: parent node 770 * @name: child name to look for. 771 * 772 * This function looks for child node for given matching name 773 * 774 * Return: A node pointer if found, with refcount incremented, use 775 * of_node_put() on it when done. 776 * Returns NULL if node is not found. 777 */ 778 struct device_node *of_get_child_by_name(const struct device_node *node, 779 const char *name) 780 { 781 struct device_node *child; 782 783 for_each_child_of_node(node, child) 784 if (of_node_name_eq(child, name)) 785 break; 786 return child; 787 } 788 EXPORT_SYMBOL(of_get_child_by_name); 789 790 struct device_node *__of_find_node_by_path(const struct device_node *parent, 791 const char *path) 792 { 793 struct device_node *child; 794 int len; 795 796 len = strcspn(path, "/:"); 797 if (!len) 798 return NULL; 799 800 __for_each_child_of_node(parent, child) { 801 const char *name = kbasename(child->full_name); 802 if (strncmp(path, name, len) == 0 && (strlen(name) == len)) 803 return child; 804 } 805 return NULL; 806 } 807 808 struct device_node *__of_find_node_by_full_path(struct device_node *node, 809 const char *path) 810 { 811 const char *separator = strchr(path, ':'); 812 813 while (node && *path == '/') { 814 struct device_node *tmp = node; 815 816 path++; /* Increment past '/' delimiter */ 817 node = __of_find_node_by_path(node, path); 818 of_node_put(tmp); 819 path = strchrnul(path, '/'); 820 if (separator && separator < path) 821 break; 822 } 823 return node; 824 } 825 826 /** 827 * of_find_node_opts_by_path - Find a node matching a full OF path 828 * @path: Either the full path to match, or if the path does not 829 * start with '/', the name of a property of the /aliases 830 * node (an alias). In the case of an alias, the node 831 * matching the alias' value will be returned. 832 * @opts: Address of a pointer into which to store the start of 833 * an options string appended to the end of the path with 834 * a ':' separator. 835 * 836 * Valid paths: 837 * * /foo/bar Full path 838 * * foo Valid alias 839 * * foo/bar Valid alias + relative path 840 * 841 * Return: A node pointer with refcount incremented, use 842 * of_node_put() on it when done. 843 */ 844 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) 845 { 846 struct device_node *np = NULL; 847 const struct property *pp; 848 unsigned long flags; 849 const char *separator = strchr(path, ':'); 850 851 if (opts) 852 *opts = separator ? separator + 1 : NULL; 853 854 if (strcmp(path, "/") == 0) 855 return of_node_get(of_root); 856 857 /* The path could begin with an alias */ 858 if (*path != '/') { 859 int len; 860 const char *p = separator; 861 862 if (!p) 863 p = strchrnul(path, '/'); 864 len = p - path; 865 866 /* of_aliases must not be NULL */ 867 if (!of_aliases) 868 return NULL; 869 870 for_each_property_of_node(of_aliases, pp) { 871 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { 872 np = of_find_node_by_path(pp->value); 873 break; 874 } 875 } 876 if (!np) 877 return NULL; 878 path = p; 879 } 880 881 /* Step down the tree matching path components */ 882 raw_spin_lock_irqsave(&devtree_lock, flags); 883 if (!np) 884 np = of_node_get(of_root); 885 np = __of_find_node_by_full_path(np, path); 886 raw_spin_unlock_irqrestore(&devtree_lock, flags); 887 return np; 888 } 889 EXPORT_SYMBOL(of_find_node_opts_by_path); 890 891 /** 892 * of_find_node_by_name - Find a node by its "name" property 893 * @from: The node to start searching from or NULL; the node 894 * you pass will not be searched, only the next one 895 * will. Typically, you pass what the previous call 896 * returned. of_node_put() will be called on @from. 897 * @name: The name string to match against 898 * 899 * Return: A node pointer with refcount incremented, use 900 * of_node_put() on it when done. 901 */ 902 struct device_node *of_find_node_by_name(struct device_node *from, 903 const char *name) 904 { 905 struct device_node *np; 906 unsigned long flags; 907 908 raw_spin_lock_irqsave(&devtree_lock, flags); 909 for_each_of_allnodes_from(from, np) 910 if (of_node_name_eq(np, name) && of_node_get(np)) 911 break; 912 of_node_put(from); 913 raw_spin_unlock_irqrestore(&devtree_lock, flags); 914 return np; 915 } 916 EXPORT_SYMBOL(of_find_node_by_name); 917 918 /** 919 * of_find_node_by_type - Find a node by its "device_type" property 920 * @from: The node to start searching from, or NULL to start searching 921 * the entire device tree. The node you pass will not be 922 * searched, only the next one will; typically, you pass 923 * what the previous call returned. of_node_put() will be 924 * called on from for you. 925 * @type: The type string to match against 926 * 927 * Return: A node pointer with refcount incremented, use 928 * of_node_put() on it when done. 929 */ 930 struct device_node *of_find_node_by_type(struct device_node *from, 931 const char *type) 932 { 933 struct device_node *np; 934 unsigned long flags; 935 936 raw_spin_lock_irqsave(&devtree_lock, flags); 937 for_each_of_allnodes_from(from, np) 938 if (__of_node_is_type(np, type) && of_node_get(np)) 939 break; 940 of_node_put(from); 941 raw_spin_unlock_irqrestore(&devtree_lock, flags); 942 return np; 943 } 944 EXPORT_SYMBOL(of_find_node_by_type); 945 946 /** 947 * of_find_compatible_node - Find a node based on type and one of the 948 * tokens in its "compatible" property 949 * @from: The node to start searching from or NULL, the node 950 * you pass will not be searched, only the next one 951 * will; typically, you pass what the previous call 952 * returned. of_node_put() will be called on it 953 * @type: The type string to match "device_type" or NULL to ignore 954 * @compatible: The string to match to one of the tokens in the device 955 * "compatible" list. 956 * 957 * Return: A node pointer with refcount incremented, use 958 * of_node_put() on it when done. 959 */ 960 struct device_node *of_find_compatible_node(struct device_node *from, 961 const char *type, const char *compatible) 962 { 963 struct device_node *np; 964 unsigned long flags; 965 966 raw_spin_lock_irqsave(&devtree_lock, flags); 967 for_each_of_allnodes_from(from, np) 968 if (__of_device_is_compatible(np, compatible, type, NULL) && 969 of_node_get(np)) 970 break; 971 of_node_put(from); 972 raw_spin_unlock_irqrestore(&devtree_lock, flags); 973 return np; 974 } 975 EXPORT_SYMBOL(of_find_compatible_node); 976 977 /** 978 * of_find_node_with_property - Find a node which has a property with 979 * the given name. 980 * @from: The node to start searching from or NULL, the node 981 * you pass will not be searched, only the next one 982 * will; typically, you pass what the previous call 983 * returned. of_node_put() will be called on it 984 * @prop_name: The name of the property to look for. 985 * 986 * Return: A node pointer with refcount incremented, use 987 * of_node_put() on it when done. 988 */ 989 struct device_node *of_find_node_with_property(struct device_node *from, 990 const char *prop_name) 991 { 992 struct device_node *np; 993 const struct property *pp; 994 unsigned long flags; 995 996 raw_spin_lock_irqsave(&devtree_lock, flags); 997 for_each_of_allnodes_from(from, np) { 998 for (pp = np->properties; pp; pp = pp->next) { 999 if (of_prop_cmp(pp->name, prop_name) == 0) { 1000 of_node_get(np); 1001 goto out; 1002 } 1003 } 1004 } 1005 out: 1006 of_node_put(from); 1007 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1008 return np; 1009 } 1010 EXPORT_SYMBOL(of_find_node_with_property); 1011 1012 static 1013 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 1014 const struct device_node *node) 1015 { 1016 const struct of_device_id *best_match = NULL; 1017 int score, best_score = 0; 1018 1019 if (!matches) 1020 return NULL; 1021 1022 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 1023 score = __of_device_is_compatible(node, matches->compatible, 1024 matches->type, matches->name); 1025 if (score > best_score) { 1026 best_match = matches; 1027 best_score = score; 1028 } 1029 } 1030 1031 return best_match; 1032 } 1033 1034 /** 1035 * of_match_node - Tell if a device_node has a matching of_match structure 1036 * @matches: array of of device match structures to search in 1037 * @node: the of device structure to match against 1038 * 1039 * Low level utility function used by device matching. 1040 */ 1041 const struct of_device_id *of_match_node(const struct of_device_id *matches, 1042 const struct device_node *node) 1043 { 1044 const struct of_device_id *match; 1045 unsigned long flags; 1046 1047 raw_spin_lock_irqsave(&devtree_lock, flags); 1048 match = __of_match_node(matches, node); 1049 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1050 return match; 1051 } 1052 EXPORT_SYMBOL(of_match_node); 1053 1054 /** 1055 * of_find_matching_node_and_match - Find a node based on an of_device_id 1056 * match table. 1057 * @from: The node to start searching from or NULL, the node 1058 * you pass will not be searched, only the next one 1059 * will; typically, you pass what the previous call 1060 * returned. of_node_put() will be called on it 1061 * @matches: array of of device match structures to search in 1062 * @match: Updated to point at the matches entry which matched 1063 * 1064 * Return: A node pointer with refcount incremented, use 1065 * of_node_put() on it when done. 1066 */ 1067 struct device_node *of_find_matching_node_and_match(struct device_node *from, 1068 const struct of_device_id *matches, 1069 const struct of_device_id **match) 1070 { 1071 struct device_node *np; 1072 const struct of_device_id *m; 1073 unsigned long flags; 1074 1075 if (match) 1076 *match = NULL; 1077 1078 raw_spin_lock_irqsave(&devtree_lock, flags); 1079 for_each_of_allnodes_from(from, np) { 1080 m = __of_match_node(matches, np); 1081 if (m && of_node_get(np)) { 1082 if (match) 1083 *match = m; 1084 break; 1085 } 1086 } 1087 of_node_put(from); 1088 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1089 return np; 1090 } 1091 EXPORT_SYMBOL(of_find_matching_node_and_match); 1092 1093 /** 1094 * of_alias_from_compatible - Lookup appropriate alias for a device node 1095 * depending on compatible 1096 * @node: pointer to a device tree node 1097 * @alias: Pointer to buffer that alias value will be copied into 1098 * @len: Length of alias value 1099 * 1100 * Based on the value of the compatible property, this routine will attempt 1101 * to choose an appropriate alias value for a particular device tree node. 1102 * It does this by stripping the manufacturer prefix (as delimited by a ',') 1103 * from the first entry in the compatible list property. 1104 * 1105 * Note: The matching on just the "product" side of the compatible is a relic 1106 * from I2C and SPI. Please do not add any new user. 1107 * 1108 * Return: This routine returns 0 on success, <0 on failure. 1109 */ 1110 int of_alias_from_compatible(const struct device_node *node, char *alias, int len) 1111 { 1112 const char *compatible, *p; 1113 int cplen; 1114 1115 compatible = of_get_property(node, "compatible", &cplen); 1116 if (!compatible || strlen(compatible) > cplen) 1117 return -ENODEV; 1118 p = strchr(compatible, ','); 1119 strscpy(alias, p ? p + 1 : compatible, len); 1120 return 0; 1121 } 1122 EXPORT_SYMBOL_GPL(of_alias_from_compatible); 1123 1124 /** 1125 * of_find_node_by_phandle - Find a node given a phandle 1126 * @handle: phandle of the node to find 1127 * 1128 * Return: A node pointer with refcount incremented, use 1129 * of_node_put() on it when done. 1130 */ 1131 struct device_node *of_find_node_by_phandle(phandle handle) 1132 { 1133 struct device_node *np = NULL; 1134 unsigned long flags; 1135 u32 handle_hash; 1136 1137 if (!handle) 1138 return NULL; 1139 1140 handle_hash = of_phandle_cache_hash(handle); 1141 1142 raw_spin_lock_irqsave(&devtree_lock, flags); 1143 1144 if (phandle_cache[handle_hash] && 1145 handle == phandle_cache[handle_hash]->phandle) 1146 np = phandle_cache[handle_hash]; 1147 1148 if (!np) { 1149 for_each_of_allnodes(np) 1150 if (np->phandle == handle && 1151 !of_node_check_flag(np, OF_DETACHED)) { 1152 phandle_cache[handle_hash] = np; 1153 break; 1154 } 1155 } 1156 1157 of_node_get(np); 1158 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1159 return np; 1160 } 1161 EXPORT_SYMBOL(of_find_node_by_phandle); 1162 1163 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) 1164 { 1165 int i; 1166 printk("%s %pOF", msg, args->np); 1167 for (i = 0; i < args->args_count; i++) { 1168 const char delim = i ? ',' : ':'; 1169 1170 pr_cont("%c%08x", delim, args->args[i]); 1171 } 1172 pr_cont("\n"); 1173 } 1174 1175 int of_phandle_iterator_init(struct of_phandle_iterator *it, 1176 const struct device_node *np, 1177 const char *list_name, 1178 const char *cells_name, 1179 int cell_count) 1180 { 1181 const __be32 *list; 1182 int size; 1183 1184 memset(it, 0, sizeof(*it)); 1185 1186 /* 1187 * one of cell_count or cells_name must be provided to determine the 1188 * argument length. 1189 */ 1190 if (cell_count < 0 && !cells_name) 1191 return -EINVAL; 1192 1193 list = of_get_property(np, list_name, &size); 1194 if (!list) 1195 return -ENOENT; 1196 1197 it->cells_name = cells_name; 1198 it->cell_count = cell_count; 1199 it->parent = np; 1200 it->list_end = list + size / sizeof(*list); 1201 it->phandle_end = list; 1202 it->cur = list; 1203 1204 return 0; 1205 } 1206 EXPORT_SYMBOL_GPL(of_phandle_iterator_init); 1207 1208 int of_phandle_iterator_next(struct of_phandle_iterator *it) 1209 { 1210 uint32_t count = 0; 1211 1212 if (it->node) { 1213 of_node_put(it->node); 1214 it->node = NULL; 1215 } 1216 1217 if (!it->cur || it->phandle_end >= it->list_end) 1218 return -ENOENT; 1219 1220 it->cur = it->phandle_end; 1221 1222 /* If phandle is 0, then it is an empty entry with no arguments. */ 1223 it->phandle = be32_to_cpup(it->cur++); 1224 1225 if (it->phandle) { 1226 1227 /* 1228 * Find the provider node and parse the #*-cells property to 1229 * determine the argument length. 1230 */ 1231 it->node = of_find_node_by_phandle(it->phandle); 1232 1233 if (it->cells_name) { 1234 if (!it->node) { 1235 pr_err("%pOF: could not find phandle %d\n", 1236 it->parent, it->phandle); 1237 goto err; 1238 } 1239 1240 if (of_property_read_u32(it->node, it->cells_name, 1241 &count)) { 1242 /* 1243 * If both cell_count and cells_name is given, 1244 * fall back to cell_count in absence 1245 * of the cells_name property 1246 */ 1247 if (it->cell_count >= 0) { 1248 count = it->cell_count; 1249 } else { 1250 pr_err("%pOF: could not get %s for %pOF\n", 1251 it->parent, 1252 it->cells_name, 1253 it->node); 1254 goto err; 1255 } 1256 } 1257 } else { 1258 count = it->cell_count; 1259 } 1260 1261 /* 1262 * Make sure that the arguments actually fit in the remaining 1263 * property data length 1264 */ 1265 if (it->cur + count > it->list_end) { 1266 if (it->cells_name) 1267 pr_err("%pOF: %s = %d found %td\n", 1268 it->parent, it->cells_name, 1269 count, it->list_end - it->cur); 1270 else 1271 pr_err("%pOF: phandle %s needs %d, found %td\n", 1272 it->parent, of_node_full_name(it->node), 1273 count, it->list_end - it->cur); 1274 goto err; 1275 } 1276 } 1277 1278 it->phandle_end = it->cur + count; 1279 it->cur_count = count; 1280 1281 return 0; 1282 1283 err: 1284 if (it->node) { 1285 of_node_put(it->node); 1286 it->node = NULL; 1287 } 1288 1289 return -EINVAL; 1290 } 1291 EXPORT_SYMBOL_GPL(of_phandle_iterator_next); 1292 1293 int of_phandle_iterator_args(struct of_phandle_iterator *it, 1294 uint32_t *args, 1295 int size) 1296 { 1297 int i, count; 1298 1299 count = it->cur_count; 1300 1301 if (WARN_ON(size < count)) 1302 count = size; 1303 1304 for (i = 0; i < count; i++) 1305 args[i] = be32_to_cpup(it->cur++); 1306 1307 return count; 1308 } 1309 1310 int __of_parse_phandle_with_args(const struct device_node *np, 1311 const char *list_name, 1312 const char *cells_name, 1313 int cell_count, int index, 1314 struct of_phandle_args *out_args) 1315 { 1316 struct of_phandle_iterator it; 1317 int rc, cur_index = 0; 1318 1319 if (index < 0) 1320 return -EINVAL; 1321 1322 /* Loop over the phandles until all the requested entry is found */ 1323 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { 1324 /* 1325 * All of the error cases bail out of the loop, so at 1326 * this point, the parsing is successful. If the requested 1327 * index matches, then fill the out_args structure and return, 1328 * or return -ENOENT for an empty entry. 1329 */ 1330 rc = -ENOENT; 1331 if (cur_index == index) { 1332 if (!it.phandle) 1333 goto err; 1334 1335 if (out_args) { 1336 int c; 1337 1338 c = of_phandle_iterator_args(&it, 1339 out_args->args, 1340 MAX_PHANDLE_ARGS); 1341 out_args->np = it.node; 1342 out_args->args_count = c; 1343 } else { 1344 of_node_put(it.node); 1345 } 1346 1347 /* Found it! return success */ 1348 return 0; 1349 } 1350 1351 cur_index++; 1352 } 1353 1354 /* 1355 * Unlock node before returning result; will be one of: 1356 * -ENOENT : index is for empty phandle 1357 * -EINVAL : parsing error on data 1358 */ 1359 1360 err: 1361 of_node_put(it.node); 1362 return rc; 1363 } 1364 EXPORT_SYMBOL(__of_parse_phandle_with_args); 1365 1366 /** 1367 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it 1368 * @np: pointer to a device tree node containing a list 1369 * @list_name: property name that contains a list 1370 * @stem_name: stem of property names that specify phandles' arguments count 1371 * @index: index of a phandle to parse out 1372 * @out_args: optional pointer to output arguments structure (will be filled) 1373 * 1374 * This function is useful to parse lists of phandles and their arguments. 1375 * Returns 0 on success and fills out_args, on error returns appropriate errno 1376 * value. The difference between this function and of_parse_phandle_with_args() 1377 * is that this API remaps a phandle if the node the phandle points to has 1378 * a <@stem_name>-map property. 1379 * 1380 * Caller is responsible to call of_node_put() on the returned out_args->np 1381 * pointer. 1382 * 1383 * Example:: 1384 * 1385 * phandle1: node1 { 1386 * #list-cells = <2>; 1387 * }; 1388 * 1389 * phandle2: node2 { 1390 * #list-cells = <1>; 1391 * }; 1392 * 1393 * phandle3: node3 { 1394 * #list-cells = <1>; 1395 * list-map = <0 &phandle2 3>, 1396 * <1 &phandle2 2>, 1397 * <2 &phandle1 5 1>; 1398 * list-map-mask = <0x3>; 1399 * }; 1400 * 1401 * node4 { 1402 * list = <&phandle1 1 2 &phandle3 0>; 1403 * }; 1404 * 1405 * To get a device_node of the ``node2`` node you may call this: 1406 * of_parse_phandle_with_args(node4, "list", "list", 1, &args); 1407 */ 1408 int of_parse_phandle_with_args_map(const struct device_node *np, 1409 const char *list_name, 1410 const char *stem_name, 1411 int index, struct of_phandle_args *out_args) 1412 { 1413 char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); 1414 char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name); 1415 char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); 1416 char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); 1417 struct device_node *cur, *new = NULL; 1418 const __be32 *map, *mask, *pass; 1419 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; 1420 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) }; 1421 __be32 initial_match_array[MAX_PHANDLE_ARGS]; 1422 const __be32 *match_array = initial_match_array; 1423 int i, ret, map_len, match; 1424 u32 list_size, new_size; 1425 1426 if (index < 0) 1427 return -EINVAL; 1428 1429 if (!cells_name || !map_name || !mask_name || !pass_name) 1430 return -ENOMEM; 1431 1432 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, 1433 out_args); 1434 if (ret) 1435 return ret; 1436 1437 /* Get the #<list>-cells property */ 1438 cur = out_args->np; 1439 ret = of_property_read_u32(cur, cells_name, &list_size); 1440 if (ret < 0) 1441 goto put; 1442 1443 /* Precalculate the match array - this simplifies match loop */ 1444 for (i = 0; i < list_size; i++) 1445 initial_match_array[i] = cpu_to_be32(out_args->args[i]); 1446 1447 ret = -EINVAL; 1448 while (cur) { 1449 /* Get the <list>-map property */ 1450 map = of_get_property(cur, map_name, &map_len); 1451 if (!map) { 1452 return 0; 1453 } 1454 map_len /= sizeof(u32); 1455 1456 /* Get the <list>-map-mask property (optional) */ 1457 mask = of_get_property(cur, mask_name, NULL); 1458 if (!mask) 1459 mask = dummy_mask; 1460 /* Iterate through <list>-map property */ 1461 match = 0; 1462 while (map_len > (list_size + 1) && !match) { 1463 /* Compare specifiers */ 1464 match = 1; 1465 for (i = 0; i < list_size; i++, map_len--) 1466 match &= !((match_array[i] ^ *map++) & mask[i]); 1467 1468 of_node_put(new); 1469 new = of_find_node_by_phandle(be32_to_cpup(map)); 1470 map++; 1471 map_len--; 1472 1473 /* Check if not found */ 1474 if (!new) 1475 goto put; 1476 1477 if (!of_device_is_available(new)) 1478 match = 0; 1479 1480 ret = of_property_read_u32(new, cells_name, &new_size); 1481 if (ret) 1482 goto put; 1483 1484 /* Check for malformed properties */ 1485 if (WARN_ON(new_size > MAX_PHANDLE_ARGS)) 1486 goto put; 1487 if (map_len < new_size) 1488 goto put; 1489 1490 /* Move forward by new node's #<list>-cells amount */ 1491 map += new_size; 1492 map_len -= new_size; 1493 } 1494 if (!match) 1495 goto put; 1496 1497 /* Get the <list>-map-pass-thru property (optional) */ 1498 pass = of_get_property(cur, pass_name, NULL); 1499 if (!pass) 1500 pass = dummy_pass; 1501 1502 /* 1503 * Successfully parsed a <list>-map translation; copy new 1504 * specifier into the out_args structure, keeping the 1505 * bits specified in <list>-map-pass-thru. 1506 */ 1507 match_array = map - new_size; 1508 for (i = 0; i < new_size; i++) { 1509 __be32 val = *(map - new_size + i); 1510 1511 if (i < list_size) { 1512 val &= ~pass[i]; 1513 val |= cpu_to_be32(out_args->args[i]) & pass[i]; 1514 } 1515 1516 out_args->args[i] = be32_to_cpu(val); 1517 } 1518 out_args->args_count = list_size = new_size; 1519 /* Iterate again with new provider */ 1520 out_args->np = new; 1521 of_node_put(cur); 1522 cur = new; 1523 new = NULL; 1524 } 1525 put: 1526 of_node_put(cur); 1527 of_node_put(new); 1528 return ret; 1529 } 1530 EXPORT_SYMBOL(of_parse_phandle_with_args_map); 1531 1532 /** 1533 * of_count_phandle_with_args() - Find the number of phandles references in a property 1534 * @np: pointer to a device tree node containing a list 1535 * @list_name: property name that contains a list 1536 * @cells_name: property name that specifies phandles' arguments count 1537 * 1538 * Return: The number of phandle + argument tuples within a property. It 1539 * is a typical pattern to encode a list of phandle and variable 1540 * arguments into a single property. The number of arguments is encoded 1541 * by a property in the phandle-target node. For example, a gpios 1542 * property would contain a list of GPIO specifies consisting of a 1543 * phandle and 1 or more arguments. The number of arguments are 1544 * determined by the #gpio-cells property in the node pointed to by the 1545 * phandle. 1546 */ 1547 int of_count_phandle_with_args(const struct device_node *np, const char *list_name, 1548 const char *cells_name) 1549 { 1550 struct of_phandle_iterator it; 1551 int rc, cur_index = 0; 1552 1553 /* 1554 * If cells_name is NULL we assume a cell count of 0. This makes 1555 * counting the phandles trivial as each 32bit word in the list is a 1556 * phandle and no arguments are to consider. So we don't iterate through 1557 * the list but just use the length to determine the phandle count. 1558 */ 1559 if (!cells_name) { 1560 const __be32 *list; 1561 int size; 1562 1563 list = of_get_property(np, list_name, &size); 1564 if (!list) 1565 return -ENOENT; 1566 1567 return size / sizeof(*list); 1568 } 1569 1570 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); 1571 if (rc) 1572 return rc; 1573 1574 while ((rc = of_phandle_iterator_next(&it)) == 0) 1575 cur_index += 1; 1576 1577 if (rc != -ENOENT) 1578 return rc; 1579 1580 return cur_index; 1581 } 1582 EXPORT_SYMBOL(of_count_phandle_with_args); 1583 1584 static struct property *__of_remove_property_from_list(struct property **list, struct property *prop) 1585 { 1586 struct property **next; 1587 1588 for (next = list; *next; next = &(*next)->next) { 1589 if (*next == prop) { 1590 *next = prop->next; 1591 prop->next = NULL; 1592 return prop; 1593 } 1594 } 1595 return NULL; 1596 } 1597 1598 /** 1599 * __of_add_property - Add a property to a node without lock operations 1600 * @np: Caller's Device Node 1601 * @prop: Property to add 1602 */ 1603 int __of_add_property(struct device_node *np, struct property *prop) 1604 { 1605 int rc = 0; 1606 unsigned long flags; 1607 struct property **next; 1608 1609 raw_spin_lock_irqsave(&devtree_lock, flags); 1610 1611 __of_remove_property_from_list(&np->deadprops, prop); 1612 1613 prop->next = NULL; 1614 next = &np->properties; 1615 while (*next) { 1616 if (strcmp(prop->name, (*next)->name) == 0) { 1617 /* duplicate ! don't insert it */ 1618 rc = -EEXIST; 1619 goto out_unlock; 1620 } 1621 next = &(*next)->next; 1622 } 1623 *next = prop; 1624 1625 out_unlock: 1626 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1627 if (rc) 1628 return rc; 1629 1630 __of_add_property_sysfs(np, prop); 1631 return 0; 1632 } 1633 1634 /** 1635 * of_add_property - Add a property to a node 1636 * @np: Caller's Device Node 1637 * @prop: Property to add 1638 */ 1639 int of_add_property(struct device_node *np, struct property *prop) 1640 { 1641 int rc; 1642 1643 mutex_lock(&of_mutex); 1644 rc = __of_add_property(np, prop); 1645 mutex_unlock(&of_mutex); 1646 1647 if (!rc) 1648 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); 1649 1650 return rc; 1651 } 1652 EXPORT_SYMBOL_GPL(of_add_property); 1653 1654 int __of_remove_property(struct device_node *np, struct property *prop) 1655 { 1656 unsigned long flags; 1657 int rc = -ENODEV; 1658 1659 raw_spin_lock_irqsave(&devtree_lock, flags); 1660 1661 if (__of_remove_property_from_list(&np->properties, prop)) { 1662 /* Found the property, add it to deadprops list */ 1663 prop->next = np->deadprops; 1664 np->deadprops = prop; 1665 rc = 0; 1666 } 1667 1668 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1669 if (rc) 1670 return rc; 1671 1672 __of_remove_property_sysfs(np, prop); 1673 return 0; 1674 } 1675 1676 /** 1677 * of_remove_property - Remove a property from a node. 1678 * @np: Caller's Device Node 1679 * @prop: Property to remove 1680 * 1681 * Note that we don't actually remove it, since we have given out 1682 * who-knows-how-many pointers to the data using get-property. 1683 * Instead we just move the property to the "dead properties" 1684 * list, so it won't be found any more. 1685 */ 1686 int of_remove_property(struct device_node *np, struct property *prop) 1687 { 1688 int rc; 1689 1690 if (!prop) 1691 return -ENODEV; 1692 1693 mutex_lock(&of_mutex); 1694 rc = __of_remove_property(np, prop); 1695 mutex_unlock(&of_mutex); 1696 1697 if (!rc) 1698 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); 1699 1700 return rc; 1701 } 1702 EXPORT_SYMBOL_GPL(of_remove_property); 1703 1704 int __of_update_property(struct device_node *np, struct property *newprop, 1705 struct property **oldpropp) 1706 { 1707 struct property **next, *oldprop; 1708 unsigned long flags; 1709 1710 raw_spin_lock_irqsave(&devtree_lock, flags); 1711 1712 __of_remove_property_from_list(&np->deadprops, newprop); 1713 1714 for (next = &np->properties; *next; next = &(*next)->next) { 1715 if (of_prop_cmp((*next)->name, newprop->name) == 0) 1716 break; 1717 } 1718 *oldpropp = oldprop = *next; 1719 1720 if (oldprop) { 1721 /* replace the node */ 1722 newprop->next = oldprop->next; 1723 *next = newprop; 1724 oldprop->next = np->deadprops; 1725 np->deadprops = oldprop; 1726 } else { 1727 /* new node */ 1728 newprop->next = NULL; 1729 *next = newprop; 1730 } 1731 1732 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1733 1734 __of_update_property_sysfs(np, newprop, oldprop); 1735 1736 return 0; 1737 } 1738 1739 /* 1740 * of_update_property - Update a property in a node, if the property does 1741 * not exist, add it. 1742 * 1743 * Note that we don't actually remove it, since we have given out 1744 * who-knows-how-many pointers to the data using get-property. 1745 * Instead we just move the property to the "dead properties" list, 1746 * and add the new property to the property list 1747 */ 1748 int of_update_property(struct device_node *np, struct property *newprop) 1749 { 1750 struct property *oldprop; 1751 int rc; 1752 1753 if (!newprop->name) 1754 return -EINVAL; 1755 1756 mutex_lock(&of_mutex); 1757 rc = __of_update_property(np, newprop, &oldprop); 1758 mutex_unlock(&of_mutex); 1759 1760 if (!rc) 1761 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); 1762 1763 return rc; 1764 } 1765 1766 static void of_alias_add(struct alias_prop *ap, struct device_node *np, 1767 int id, const char *stem, int stem_len) 1768 { 1769 ap->np = np; 1770 ap->id = id; 1771 strscpy(ap->stem, stem, stem_len + 1); 1772 list_add_tail(&ap->link, &aliases_lookup); 1773 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", 1774 ap->alias, ap->stem, ap->id, np); 1775 } 1776 1777 /** 1778 * of_alias_scan - Scan all properties of the 'aliases' node 1779 * @dt_alloc: An allocator that provides a virtual address to memory 1780 * for storing the resulting tree 1781 * 1782 * The function scans all the properties of the 'aliases' node and populates 1783 * the global lookup table with the properties. It returns the 1784 * number of alias properties found, or an error code in case of failure. 1785 */ 1786 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) 1787 { 1788 const struct property *pp; 1789 1790 of_aliases = of_find_node_by_path("/aliases"); 1791 of_chosen = of_find_node_by_path("/chosen"); 1792 if (of_chosen == NULL) 1793 of_chosen = of_find_node_by_path("/chosen@0"); 1794 1795 if (of_chosen) { 1796 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ 1797 const char *name = NULL; 1798 1799 if (of_property_read_string(of_chosen, "stdout-path", &name)) 1800 of_property_read_string(of_chosen, "linux,stdout-path", 1801 &name); 1802 if (IS_ENABLED(CONFIG_PPC) && !name) 1803 of_property_read_string(of_aliases, "stdout", &name); 1804 if (name) 1805 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1806 if (of_stdout) 1807 of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT; 1808 } 1809 1810 if (!of_aliases) 1811 return; 1812 1813 for_each_property_of_node(of_aliases, pp) { 1814 const char *start = pp->name; 1815 const char *end = start + strlen(start); 1816 struct device_node *np; 1817 struct alias_prop *ap; 1818 int id, len; 1819 1820 /* Skip those we do not want to proceed */ 1821 if (!strcmp(pp->name, "name") || 1822 !strcmp(pp->name, "phandle") || 1823 !strcmp(pp->name, "linux,phandle")) 1824 continue; 1825 1826 np = of_find_node_by_path(pp->value); 1827 if (!np) 1828 continue; 1829 1830 /* walk the alias backwards to extract the id and work out 1831 * the 'stem' string */ 1832 while (isdigit(*(end-1)) && end > start) 1833 end--; 1834 len = end - start; 1835 1836 if (kstrtoint(end, 10, &id) < 0) 1837 continue; 1838 1839 /* Allocate an alias_prop with enough space for the stem */ 1840 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); 1841 if (!ap) 1842 continue; 1843 memset(ap, 0, sizeof(*ap) + len + 1); 1844 ap->alias = start; 1845 of_alias_add(ap, np, id, start, len); 1846 } 1847 } 1848 1849 /** 1850 * of_alias_get_id - Get alias id for the given device_node 1851 * @np: Pointer to the given device_node 1852 * @stem: Alias stem of the given device_node 1853 * 1854 * The function travels the lookup table to get the alias id for the given 1855 * device_node and alias stem. 1856 * 1857 * Return: The alias id if found. 1858 */ 1859 int of_alias_get_id(const struct device_node *np, const char *stem) 1860 { 1861 struct alias_prop *app; 1862 int id = -ENODEV; 1863 1864 mutex_lock(&of_mutex); 1865 list_for_each_entry(app, &aliases_lookup, link) { 1866 if (strcmp(app->stem, stem) != 0) 1867 continue; 1868 1869 if (np == app->np) { 1870 id = app->id; 1871 break; 1872 } 1873 } 1874 mutex_unlock(&of_mutex); 1875 1876 return id; 1877 } 1878 EXPORT_SYMBOL_GPL(of_alias_get_id); 1879 1880 /** 1881 * of_alias_get_highest_id - Get highest alias id for the given stem 1882 * @stem: Alias stem to be examined 1883 * 1884 * The function travels the lookup table to get the highest alias id for the 1885 * given alias stem. It returns the alias id if found. 1886 */ 1887 int of_alias_get_highest_id(const char *stem) 1888 { 1889 struct alias_prop *app; 1890 int id = -ENODEV; 1891 1892 mutex_lock(&of_mutex); 1893 list_for_each_entry(app, &aliases_lookup, link) { 1894 if (strcmp(app->stem, stem) != 0) 1895 continue; 1896 1897 if (app->id > id) 1898 id = app->id; 1899 } 1900 mutex_unlock(&of_mutex); 1901 1902 return id; 1903 } 1904 EXPORT_SYMBOL_GPL(of_alias_get_highest_id); 1905 1906 /** 1907 * of_console_check() - Test and setup console for DT setup 1908 * @dn: Pointer to device node 1909 * @name: Name to use for preferred console without index. ex. "ttyS" 1910 * @index: Index to use for preferred console. 1911 * 1912 * Check if the given device node matches the stdout-path property in the 1913 * /chosen node. If it does then register it as the preferred console. 1914 * 1915 * Return: TRUE if console successfully setup. Otherwise return FALSE. 1916 */ 1917 bool of_console_check(const struct device_node *dn, char *name, int index) 1918 { 1919 if (!dn || dn != of_stdout || console_set_on_cmdline) 1920 return false; 1921 1922 /* 1923 * XXX: cast `options' to char pointer to suppress complication 1924 * warnings: printk, UART and console drivers expect char pointer. 1925 */ 1926 return !add_preferred_console(name, index, (char *)of_stdout_options); 1927 } 1928 EXPORT_SYMBOL_GPL(of_console_check); 1929 1930 /** 1931 * of_find_next_cache_node - Find a node's subsidiary cache 1932 * @np: node of type "cpu" or "cache" 1933 * 1934 * Return: A node pointer with refcount incremented, use 1935 * of_node_put() on it when done. Caller should hold a reference 1936 * to np. 1937 */ 1938 struct device_node *of_find_next_cache_node(const struct device_node *np) 1939 { 1940 struct device_node *child, *cache_node; 1941 1942 cache_node = of_parse_phandle(np, "l2-cache", 0); 1943 if (!cache_node) 1944 cache_node = of_parse_phandle(np, "next-level-cache", 0); 1945 1946 if (cache_node) 1947 return cache_node; 1948 1949 /* OF on pmac has nodes instead of properties named "l2-cache" 1950 * beneath CPU nodes. 1951 */ 1952 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) 1953 for_each_child_of_node(np, child) 1954 if (of_node_is_type(child, "cache")) 1955 return child; 1956 1957 return NULL; 1958 } 1959 1960 /** 1961 * of_find_last_cache_level - Find the level at which the last cache is 1962 * present for the given logical cpu 1963 * 1964 * @cpu: cpu number(logical index) for which the last cache level is needed 1965 * 1966 * Return: The level at which the last cache is present. It is exactly 1967 * same as the total number of cache levels for the given logical cpu. 1968 */ 1969 int of_find_last_cache_level(unsigned int cpu) 1970 { 1971 u32 cache_level = 0; 1972 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); 1973 1974 while (np) { 1975 of_node_put(prev); 1976 prev = np; 1977 np = of_find_next_cache_node(np); 1978 } 1979 1980 of_property_read_u32(prev, "cache-level", &cache_level); 1981 of_node_put(prev); 1982 1983 return cache_level; 1984 } 1985 1986 /** 1987 * of_map_id - Translate an ID through a downstream mapping. 1988 * @np: root complex device node. 1989 * @id: device ID to map. 1990 * @map_name: property name of the map to use. 1991 * @map_mask_name: optional property name of the mask to use. 1992 * @target: optional pointer to a target device node. 1993 * @id_out: optional pointer to receive the translated ID. 1994 * 1995 * Given a device ID, look up the appropriate implementation-defined 1996 * platform ID and/or the target device which receives transactions on that 1997 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or 1998 * @id_out may be NULL if only the other is required. If @target points to 1999 * a non-NULL device node pointer, only entries targeting that node will be 2000 * matched; if it points to a NULL value, it will receive the device node of 2001 * the first matching target phandle, with a reference held. 2002 * 2003 * Return: 0 on success or a standard error code on failure. 2004 */ 2005 int of_map_id(const struct device_node *np, u32 id, 2006 const char *map_name, const char *map_mask_name, 2007 struct device_node **target, u32 *id_out) 2008 { 2009 u32 map_mask, masked_id; 2010 int map_len; 2011 const __be32 *map = NULL; 2012 2013 if (!np || !map_name || (!target && !id_out)) 2014 return -EINVAL; 2015 2016 map = of_get_property(np, map_name, &map_len); 2017 if (!map) { 2018 if (target) 2019 return -ENODEV; 2020 /* Otherwise, no map implies no translation */ 2021 *id_out = id; 2022 return 0; 2023 } 2024 2025 if (!map_len || map_len % (4 * sizeof(*map))) { 2026 pr_err("%pOF: Error: Bad %s length: %d\n", np, 2027 map_name, map_len); 2028 return -EINVAL; 2029 } 2030 2031 /* The default is to select all bits. */ 2032 map_mask = 0xffffffff; 2033 2034 /* 2035 * Can be overridden by "{iommu,msi}-map-mask" property. 2036 * If of_property_read_u32() fails, the default is used. 2037 */ 2038 if (map_mask_name) 2039 of_property_read_u32(np, map_mask_name, &map_mask); 2040 2041 masked_id = map_mask & id; 2042 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { 2043 struct device_node *phandle_node; 2044 u32 id_base = be32_to_cpup(map + 0); 2045 u32 phandle = be32_to_cpup(map + 1); 2046 u32 out_base = be32_to_cpup(map + 2); 2047 u32 id_len = be32_to_cpup(map + 3); 2048 2049 if (id_base & ~map_mask) { 2050 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", 2051 np, map_name, map_name, 2052 map_mask, id_base); 2053 return -EFAULT; 2054 } 2055 2056 if (masked_id < id_base || masked_id >= id_base + id_len) 2057 continue; 2058 2059 phandle_node = of_find_node_by_phandle(phandle); 2060 if (!phandle_node) 2061 return -ENODEV; 2062 2063 if (target) { 2064 if (*target) 2065 of_node_put(phandle_node); 2066 else 2067 *target = phandle_node; 2068 2069 if (*target != phandle_node) 2070 continue; 2071 } 2072 2073 if (id_out) 2074 *id_out = masked_id - id_base + out_base; 2075 2076 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", 2077 np, map_name, map_mask, id_base, out_base, 2078 id_len, id, masked_id - id_base + out_base); 2079 return 0; 2080 } 2081 2082 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, 2083 id, target && *target ? *target : NULL); 2084 2085 /* Bypasses translation */ 2086 if (id_out) 2087 *id_out = id; 2088 return 0; 2089 } 2090 EXPORT_SYMBOL_GPL(of_map_id); 2091