1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Procedures for creating, accessing and interpreting the device tree. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 * 11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net 12 * 13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and 14 * Grant Likely. 15 */ 16 17 #define pr_fmt(fmt) "OF: " fmt 18 19 #include <linux/cleanup.h> 20 #include <linux/console.h> 21 #include <linux/ctype.h> 22 #include <linux/cpu.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/of_device.h> 26 #include <linux/of_graph.h> 27 #include <linux/spinlock.h> 28 #include <linux/slab.h> 29 #include <linux/string.h> 30 #include <linux/proc_fs.h> 31 32 #include "of_private.h" 33 34 LIST_HEAD(aliases_lookup); 35 36 struct device_node *of_root; 37 EXPORT_SYMBOL(of_root); 38 struct device_node *of_chosen; 39 EXPORT_SYMBOL(of_chosen); 40 struct device_node *of_aliases; 41 struct device_node *of_stdout; 42 static const char *of_stdout_options; 43 44 struct kset *of_kset; 45 46 /* 47 * Used to protect the of_aliases, to hold off addition of nodes to sysfs. 48 * This mutex must be held whenever modifications are being made to the 49 * device tree. The of_{attach,detach}_node() and 50 * of_{add,remove,update}_property() helpers make sure this happens. 51 */ 52 DEFINE_MUTEX(of_mutex); 53 54 /* use when traversing tree through the child, sibling, 55 * or parent members of struct device_node. 56 */ 57 DEFINE_RAW_SPINLOCK(devtree_lock); 58 59 bool of_node_name_eq(const struct device_node *np, const char *name) 60 { 61 const char *node_name; 62 size_t len; 63 64 if (!np) 65 return false; 66 67 node_name = kbasename(np->full_name); 68 len = strchrnul(node_name, '@') - node_name; 69 70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); 71 } 72 EXPORT_SYMBOL(of_node_name_eq); 73 74 bool of_node_name_prefix(const struct device_node *np, const char *prefix) 75 { 76 if (!np) 77 return false; 78 79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; 80 } 81 EXPORT_SYMBOL(of_node_name_prefix); 82 83 static bool __of_node_is_type(const struct device_node *np, const char *type) 84 { 85 const char *match = __of_get_property(np, "device_type", NULL); 86 87 return np && match && type && !strcmp(match, type); 88 } 89 90 #define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \ 91 IS_ENABLED(CONFIG_SPARC) || \ 92 of_find_compatible_node(NULL, NULL, "coreboot") \ 93 ) 94 95 int of_bus_n_addr_cells(struct device_node *np) 96 { 97 u32 cells; 98 99 for (; np; np = np->parent) { 100 if (!of_property_read_u32(np, "#address-cells", &cells)) 101 return cells; 102 /* 103 * Default root value and walking parent nodes for "#address-cells" 104 * is deprecated. Any platforms which hit this warning should 105 * be added to the excluded list. 106 */ 107 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 108 "Missing '#address-cells' in %pOF\n", np); 109 } 110 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 111 } 112 113 int of_n_addr_cells(struct device_node *np) 114 { 115 if (np->parent) 116 np = np->parent; 117 118 return of_bus_n_addr_cells(np); 119 } 120 EXPORT_SYMBOL(of_n_addr_cells); 121 122 int of_bus_n_size_cells(struct device_node *np) 123 { 124 u32 cells; 125 126 for (; np; np = np->parent) { 127 if (!of_property_read_u32(np, "#size-cells", &cells)) 128 return cells; 129 /* 130 * Default root value and walking parent nodes for "#size-cells" 131 * is deprecated. Any platforms which hit this warning should 132 * be added to the excluded list. 133 */ 134 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 135 "Missing '#size-cells' in %pOF\n", np); 136 } 137 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 138 } 139 140 int of_n_size_cells(struct device_node *np) 141 { 142 if (np->parent) 143 np = np->parent; 144 145 return of_bus_n_size_cells(np); 146 } 147 EXPORT_SYMBOL(of_n_size_cells); 148 149 #ifdef CONFIG_NUMA 150 int __weak of_node_to_nid(struct device_node *np) 151 { 152 return NUMA_NO_NODE; 153 } 154 #endif 155 156 #define OF_PHANDLE_CACHE_BITS 7 157 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) 158 159 static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; 160 161 static u32 of_phandle_cache_hash(phandle handle) 162 { 163 return hash_32(handle, OF_PHANDLE_CACHE_BITS); 164 } 165 166 /* 167 * Caller must hold devtree_lock. 168 */ 169 void __of_phandle_cache_inv_entry(phandle handle) 170 { 171 u32 handle_hash; 172 struct device_node *np; 173 174 if (!handle) 175 return; 176 177 handle_hash = of_phandle_cache_hash(handle); 178 179 np = phandle_cache[handle_hash]; 180 if (np && handle == np->phandle) 181 phandle_cache[handle_hash] = NULL; 182 } 183 184 void __init of_core_init(void) 185 { 186 struct device_node *np; 187 188 of_platform_register_reconfig_notifier(); 189 190 /* Create the kset, and register existing nodes */ 191 mutex_lock(&of_mutex); 192 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 193 if (!of_kset) { 194 mutex_unlock(&of_mutex); 195 pr_err("failed to register existing nodes\n"); 196 return; 197 } 198 for_each_of_allnodes(np) { 199 __of_attach_node_sysfs(np); 200 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) 201 phandle_cache[of_phandle_cache_hash(np->phandle)] = np; 202 } 203 mutex_unlock(&of_mutex); 204 205 /* Symlink in /proc as required by userspace ABI */ 206 if (of_root) 207 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 208 } 209 210 static struct property *__of_find_property(const struct device_node *np, 211 const char *name, int *lenp) 212 { 213 struct property *pp; 214 215 if (!np) 216 return NULL; 217 218 for (pp = np->properties; pp; pp = pp->next) { 219 if (of_prop_cmp(pp->name, name) == 0) { 220 if (lenp) 221 *lenp = pp->length; 222 break; 223 } 224 } 225 226 return pp; 227 } 228 229 struct property *of_find_property(const struct device_node *np, 230 const char *name, 231 int *lenp) 232 { 233 struct property *pp; 234 unsigned long flags; 235 236 raw_spin_lock_irqsave(&devtree_lock, flags); 237 pp = __of_find_property(np, name, lenp); 238 raw_spin_unlock_irqrestore(&devtree_lock, flags); 239 240 return pp; 241 } 242 EXPORT_SYMBOL(of_find_property); 243 244 struct device_node *__of_find_all_nodes(struct device_node *prev) 245 { 246 struct device_node *np; 247 if (!prev) { 248 np = of_root; 249 } else if (prev->child) { 250 np = prev->child; 251 } else { 252 /* Walk back up looking for a sibling, or the end of the structure */ 253 np = prev; 254 while (np->parent && !np->sibling) 255 np = np->parent; 256 np = np->sibling; /* Might be null at the end of the tree */ 257 } 258 return np; 259 } 260 261 /** 262 * of_find_all_nodes - Get next node in global list 263 * @prev: Previous node or NULL to start iteration 264 * of_node_put() will be called on it 265 * 266 * Return: A node pointer with refcount incremented, use 267 * of_node_put() on it when done. 268 */ 269 struct device_node *of_find_all_nodes(struct device_node *prev) 270 { 271 struct device_node *np; 272 unsigned long flags; 273 274 raw_spin_lock_irqsave(&devtree_lock, flags); 275 np = __of_find_all_nodes(prev); 276 of_node_get(np); 277 of_node_put(prev); 278 raw_spin_unlock_irqrestore(&devtree_lock, flags); 279 return np; 280 } 281 EXPORT_SYMBOL(of_find_all_nodes); 282 283 /* 284 * Find a property with a given name for a given node 285 * and return the value. 286 */ 287 const void *__of_get_property(const struct device_node *np, 288 const char *name, int *lenp) 289 { 290 const struct property *pp = __of_find_property(np, name, lenp); 291 292 return pp ? pp->value : NULL; 293 } 294 295 /* 296 * Find a property with a given name for a given node 297 * and return the value. 298 */ 299 const void *of_get_property(const struct device_node *np, const char *name, 300 int *lenp) 301 { 302 const struct property *pp = of_find_property(np, name, lenp); 303 304 return pp ? pp->value : NULL; 305 } 306 EXPORT_SYMBOL(of_get_property); 307 308 /** 309 * __of_device_is_compatible() - Check if the node matches given constraints 310 * @device: pointer to node 311 * @compat: required compatible string, NULL or "" for any match 312 * @type: required device_type value, NULL or "" for any match 313 * @name: required node name, NULL or "" for any match 314 * 315 * Checks if the given @compat, @type and @name strings match the 316 * properties of the given @device. A constraints can be skipped by 317 * passing NULL or an empty string as the constraint. 318 * 319 * Returns 0 for no match, and a positive integer on match. The return 320 * value is a relative score with larger values indicating better 321 * matches. The score is weighted for the most specific compatible value 322 * to get the highest score. Matching type is next, followed by matching 323 * name. Practically speaking, this results in the following priority 324 * order for matches: 325 * 326 * 1. specific compatible && type && name 327 * 2. specific compatible && type 328 * 3. specific compatible && name 329 * 4. specific compatible 330 * 5. general compatible && type && name 331 * 6. general compatible && type 332 * 7. general compatible && name 333 * 8. general compatible 334 * 9. type && name 335 * 10. type 336 * 11. name 337 */ 338 static int __of_device_is_compatible(const struct device_node *device, 339 const char *compat, const char *type, const char *name) 340 { 341 const struct property *prop; 342 const char *cp; 343 int index = 0, score = 0; 344 345 /* Compatible match has highest priority */ 346 if (compat && compat[0]) { 347 prop = __of_find_property(device, "compatible", NULL); 348 for (cp = of_prop_next_string(prop, NULL); cp; 349 cp = of_prop_next_string(prop, cp), index++) { 350 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 351 score = INT_MAX/2 - (index << 2); 352 break; 353 } 354 } 355 if (!score) 356 return 0; 357 } 358 359 /* Matching type is better than matching name */ 360 if (type && type[0]) { 361 if (!__of_node_is_type(device, type)) 362 return 0; 363 score += 2; 364 } 365 366 /* Matching name is a bit better than not */ 367 if (name && name[0]) { 368 if (!of_node_name_eq(device, name)) 369 return 0; 370 score++; 371 } 372 373 return score; 374 } 375 376 /** Checks if the given "compat" string matches one of the strings in 377 * the device's "compatible" property 378 */ 379 int of_device_is_compatible(const struct device_node *device, 380 const char *compat) 381 { 382 unsigned long flags; 383 int res; 384 385 raw_spin_lock_irqsave(&devtree_lock, flags); 386 res = __of_device_is_compatible(device, compat, NULL, NULL); 387 raw_spin_unlock_irqrestore(&devtree_lock, flags); 388 return res; 389 } 390 EXPORT_SYMBOL(of_device_is_compatible); 391 392 /** Checks if the device is compatible with any of the entries in 393 * a NULL terminated array of strings. Returns the best match 394 * score or 0. 395 */ 396 int of_device_compatible_match(const struct device_node *device, 397 const char *const *compat) 398 { 399 unsigned int tmp, score = 0; 400 401 if (!compat) 402 return 0; 403 404 while (*compat) { 405 tmp = of_device_is_compatible(device, *compat); 406 if (tmp > score) 407 score = tmp; 408 compat++; 409 } 410 411 return score; 412 } 413 EXPORT_SYMBOL_GPL(of_device_compatible_match); 414 415 /** 416 * of_machine_compatible_match - Test root of device tree against a compatible array 417 * @compats: NULL terminated array of compatible strings to look for in root node's compatible property. 418 * 419 * Returns true if the root node has any of the given compatible values in its 420 * compatible property. 421 */ 422 bool of_machine_compatible_match(const char *const *compats) 423 { 424 struct device_node *root; 425 int rc = 0; 426 427 root = of_find_node_by_path("/"); 428 if (root) { 429 rc = of_device_compatible_match(root, compats); 430 of_node_put(root); 431 } 432 433 return rc != 0; 434 } 435 EXPORT_SYMBOL(of_machine_compatible_match); 436 437 static bool __of_device_is_status(const struct device_node *device, 438 const char * const*strings) 439 { 440 const char *status; 441 int statlen; 442 443 if (!device) 444 return false; 445 446 status = __of_get_property(device, "status", &statlen); 447 if (status == NULL) 448 return false; 449 450 if (statlen > 0) { 451 while (*strings) { 452 unsigned int len = strlen(*strings); 453 454 if ((*strings)[len - 1] == '-') { 455 if (!strncmp(status, *strings, len)) 456 return true; 457 } else { 458 if (!strcmp(status, *strings)) 459 return true; 460 } 461 strings++; 462 } 463 } 464 465 return false; 466 } 467 468 /** 469 * __of_device_is_available - check if a device is available for use 470 * 471 * @device: Node to check for availability, with locks already held 472 * 473 * Return: True if the status property is absent or set to "okay" or "ok", 474 * false otherwise 475 */ 476 static bool __of_device_is_available(const struct device_node *device) 477 { 478 static const char * const ok[] = {"okay", "ok", NULL}; 479 480 if (!device) 481 return false; 482 483 return !__of_get_property(device, "status", NULL) || 484 __of_device_is_status(device, ok); 485 } 486 487 /** 488 * __of_device_is_reserved - check if a device is reserved 489 * 490 * @device: Node to check for availability, with locks already held 491 * 492 * Return: True if the status property is set to "reserved", false otherwise 493 */ 494 static bool __of_device_is_reserved(const struct device_node *device) 495 { 496 static const char * const reserved[] = {"reserved", NULL}; 497 498 return __of_device_is_status(device, reserved); 499 } 500 501 /** 502 * of_device_is_available - check if a device is available for use 503 * 504 * @device: Node to check for availability 505 * 506 * Return: True if the status property is absent or set to "okay" or "ok", 507 * false otherwise 508 */ 509 bool of_device_is_available(const struct device_node *device) 510 { 511 unsigned long flags; 512 bool res; 513 514 raw_spin_lock_irqsave(&devtree_lock, flags); 515 res = __of_device_is_available(device); 516 raw_spin_unlock_irqrestore(&devtree_lock, flags); 517 return res; 518 519 } 520 EXPORT_SYMBOL(of_device_is_available); 521 522 /** 523 * __of_device_is_fail - check if a device has status "fail" or "fail-..." 524 * 525 * @device: Node to check status for, with locks already held 526 * 527 * Return: True if the status property is set to "fail" or "fail-..." (for any 528 * error code suffix), false otherwise 529 */ 530 static bool __of_device_is_fail(const struct device_node *device) 531 { 532 static const char * const fail[] = {"fail", "fail-", NULL}; 533 534 return __of_device_is_status(device, fail); 535 } 536 537 /** 538 * of_device_is_big_endian - check if a device has BE registers 539 * 540 * @device: Node to check for endianness 541 * 542 * Return: True if the device has a "big-endian" property, or if the kernel 543 * was compiled for BE *and* the device has a "native-endian" property. 544 * Returns false otherwise. 545 * 546 * Callers would nominally use ioread32be/iowrite32be if 547 * of_device_is_big_endian() == true, or readl/writel otherwise. 548 */ 549 bool of_device_is_big_endian(const struct device_node *device) 550 { 551 if (of_property_read_bool(device, "big-endian")) 552 return true; 553 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && 554 of_property_read_bool(device, "native-endian")) 555 return true; 556 return false; 557 } 558 EXPORT_SYMBOL(of_device_is_big_endian); 559 560 /** 561 * of_get_parent - Get a node's parent if any 562 * @node: Node to get parent 563 * 564 * Return: A node pointer with refcount incremented, use 565 * of_node_put() on it when done. 566 */ 567 struct device_node *of_get_parent(const struct device_node *node) 568 { 569 struct device_node *np; 570 unsigned long flags; 571 572 if (!node) 573 return NULL; 574 575 raw_spin_lock_irqsave(&devtree_lock, flags); 576 np = of_node_get(node->parent); 577 raw_spin_unlock_irqrestore(&devtree_lock, flags); 578 return np; 579 } 580 EXPORT_SYMBOL(of_get_parent); 581 582 /** 583 * of_get_next_parent - Iterate to a node's parent 584 * @node: Node to get parent of 585 * 586 * This is like of_get_parent() except that it drops the 587 * refcount on the passed node, making it suitable for iterating 588 * through a node's parents. 589 * 590 * Return: A node pointer with refcount incremented, use 591 * of_node_put() on it when done. 592 */ 593 struct device_node *of_get_next_parent(struct device_node *node) 594 { 595 struct device_node *parent; 596 unsigned long flags; 597 598 if (!node) 599 return NULL; 600 601 raw_spin_lock_irqsave(&devtree_lock, flags); 602 parent = of_node_get(node->parent); 603 of_node_put(node); 604 raw_spin_unlock_irqrestore(&devtree_lock, flags); 605 return parent; 606 } 607 EXPORT_SYMBOL(of_get_next_parent); 608 609 static struct device_node *__of_get_next_child(const struct device_node *node, 610 struct device_node *prev) 611 { 612 struct device_node *next; 613 614 if (!node) 615 return NULL; 616 617 next = prev ? prev->sibling : node->child; 618 of_node_get(next); 619 of_node_put(prev); 620 return next; 621 } 622 #define __for_each_child_of_node(parent, child) \ 623 for (child = __of_get_next_child(parent, NULL); child != NULL; \ 624 child = __of_get_next_child(parent, child)) 625 626 /** 627 * of_get_next_child - Iterate a node childs 628 * @node: parent node 629 * @prev: previous child of the parent node, or NULL to get first 630 * 631 * Return: A node pointer with refcount incremented, use of_node_put() on 632 * it when done. Returns NULL when prev is the last child. Decrements the 633 * refcount of prev. 634 */ 635 struct device_node *of_get_next_child(const struct device_node *node, 636 struct device_node *prev) 637 { 638 struct device_node *next; 639 unsigned long flags; 640 641 raw_spin_lock_irqsave(&devtree_lock, flags); 642 next = __of_get_next_child(node, prev); 643 raw_spin_unlock_irqrestore(&devtree_lock, flags); 644 return next; 645 } 646 EXPORT_SYMBOL(of_get_next_child); 647 648 static struct device_node *of_get_next_status_child(const struct device_node *node, 649 struct device_node *prev, 650 bool (*checker)(const struct device_node *)) 651 { 652 struct device_node *next; 653 unsigned long flags; 654 655 if (!node) 656 return NULL; 657 658 raw_spin_lock_irqsave(&devtree_lock, flags); 659 next = prev ? prev->sibling : node->child; 660 for (; next; next = next->sibling) { 661 if (!checker(next)) 662 continue; 663 if (of_node_get(next)) 664 break; 665 } 666 of_node_put(prev); 667 raw_spin_unlock_irqrestore(&devtree_lock, flags); 668 return next; 669 } 670 671 /** 672 * of_get_next_available_child - Find the next available child node 673 * @node: parent node 674 * @prev: previous child of the parent node, or NULL to get first 675 * 676 * This function is like of_get_next_child(), except that it 677 * automatically skips any disabled nodes (i.e. status = "disabled"). 678 */ 679 struct device_node *of_get_next_available_child(const struct device_node *node, 680 struct device_node *prev) 681 { 682 return of_get_next_status_child(node, prev, __of_device_is_available); 683 } 684 EXPORT_SYMBOL(of_get_next_available_child); 685 686 /** 687 * of_get_next_reserved_child - Find the next reserved child node 688 * @node: parent node 689 * @prev: previous child of the parent node, or NULL to get first 690 * 691 * This function is like of_get_next_child(), except that it 692 * automatically skips any disabled nodes (i.e. status = "disabled"). 693 */ 694 struct device_node *of_get_next_reserved_child(const struct device_node *node, 695 struct device_node *prev) 696 { 697 return of_get_next_status_child(node, prev, __of_device_is_reserved); 698 } 699 EXPORT_SYMBOL(of_get_next_reserved_child); 700 701 /** 702 * of_get_next_cpu_node - Iterate on cpu nodes 703 * @prev: previous child of the /cpus node, or NULL to get first 704 * 705 * Unusable CPUs (those with the status property set to "fail" or "fail-...") 706 * will be skipped. 707 * 708 * Return: A cpu node pointer with refcount incremented, use of_node_put() 709 * on it when done. Returns NULL when prev is the last child. Decrements 710 * the refcount of prev. 711 */ 712 struct device_node *of_get_next_cpu_node(struct device_node *prev) 713 { 714 struct device_node *next = NULL; 715 unsigned long flags; 716 struct device_node *node; 717 718 if (!prev) 719 node = of_find_node_by_path("/cpus"); 720 721 raw_spin_lock_irqsave(&devtree_lock, flags); 722 if (prev) 723 next = prev->sibling; 724 else if (node) { 725 next = node->child; 726 of_node_put(node); 727 } 728 for (; next; next = next->sibling) { 729 if (__of_device_is_fail(next)) 730 continue; 731 if (!(of_node_name_eq(next, "cpu") || 732 __of_node_is_type(next, "cpu"))) 733 continue; 734 if (of_node_get(next)) 735 break; 736 } 737 of_node_put(prev); 738 raw_spin_unlock_irqrestore(&devtree_lock, flags); 739 return next; 740 } 741 EXPORT_SYMBOL(of_get_next_cpu_node); 742 743 /** 744 * of_get_compatible_child - Find compatible child node 745 * @parent: parent node 746 * @compatible: compatible string 747 * 748 * Lookup child node whose compatible property contains the given compatible 749 * string. 750 * 751 * Return: a node pointer with refcount incremented, use of_node_put() on it 752 * when done; or NULL if not found. 753 */ 754 struct device_node *of_get_compatible_child(const struct device_node *parent, 755 const char *compatible) 756 { 757 struct device_node *child; 758 759 for_each_child_of_node(parent, child) { 760 if (of_device_is_compatible(child, compatible)) 761 break; 762 } 763 764 return child; 765 } 766 EXPORT_SYMBOL(of_get_compatible_child); 767 768 /** 769 * of_get_child_by_name - Find the child node by name for a given parent 770 * @node: parent node 771 * @name: child name to look for. 772 * 773 * This function looks for child node for given matching name 774 * 775 * Return: A node pointer if found, with refcount incremented, use 776 * of_node_put() on it when done. 777 * Returns NULL if node is not found. 778 */ 779 struct device_node *of_get_child_by_name(const struct device_node *node, 780 const char *name) 781 { 782 struct device_node *child; 783 784 for_each_child_of_node(node, child) 785 if (of_node_name_eq(child, name)) 786 break; 787 return child; 788 } 789 EXPORT_SYMBOL(of_get_child_by_name); 790 791 struct device_node *__of_find_node_by_path(const struct device_node *parent, 792 const char *path) 793 { 794 struct device_node *child; 795 int len; 796 797 len = strcspn(path, "/:"); 798 if (!len) 799 return NULL; 800 801 __for_each_child_of_node(parent, child) { 802 const char *name = kbasename(child->full_name); 803 if (strncmp(path, name, len) == 0 && (strlen(name) == len)) 804 return child; 805 } 806 return NULL; 807 } 808 809 struct device_node *__of_find_node_by_full_path(struct device_node *node, 810 const char *path) 811 { 812 const char *separator = strchr(path, ':'); 813 814 while (node && *path == '/') { 815 struct device_node *tmp = node; 816 817 path++; /* Increment past '/' delimiter */ 818 node = __of_find_node_by_path(node, path); 819 of_node_put(tmp); 820 path = strchrnul(path, '/'); 821 if (separator && separator < path) 822 break; 823 } 824 return node; 825 } 826 827 /** 828 * of_find_node_opts_by_path - Find a node matching a full OF path 829 * @path: Either the full path to match, or if the path does not 830 * start with '/', the name of a property of the /aliases 831 * node (an alias). In the case of an alias, the node 832 * matching the alias' value will be returned. 833 * @opts: Address of a pointer into which to store the start of 834 * an options string appended to the end of the path with 835 * a ':' separator. 836 * 837 * Valid paths: 838 * * /foo/bar Full path 839 * * foo Valid alias 840 * * foo/bar Valid alias + relative path 841 * 842 * Return: A node pointer with refcount incremented, use 843 * of_node_put() on it when done. 844 */ 845 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) 846 { 847 struct device_node *np = NULL; 848 const struct property *pp; 849 unsigned long flags; 850 const char *separator = strchr(path, ':'); 851 852 if (opts) 853 *opts = separator ? separator + 1 : NULL; 854 855 if (strcmp(path, "/") == 0) 856 return of_node_get(of_root); 857 858 /* The path could begin with an alias */ 859 if (*path != '/') { 860 int len; 861 const char *p = separator; 862 863 if (!p) 864 p = strchrnul(path, '/'); 865 len = p - path; 866 867 /* of_aliases must not be NULL */ 868 if (!of_aliases) 869 return NULL; 870 871 for_each_property_of_node(of_aliases, pp) { 872 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { 873 np = of_find_node_by_path(pp->value); 874 break; 875 } 876 } 877 if (!np) 878 return NULL; 879 path = p; 880 } 881 882 /* Step down the tree matching path components */ 883 raw_spin_lock_irqsave(&devtree_lock, flags); 884 if (!np) 885 np = of_node_get(of_root); 886 np = __of_find_node_by_full_path(np, path); 887 raw_spin_unlock_irqrestore(&devtree_lock, flags); 888 return np; 889 } 890 EXPORT_SYMBOL(of_find_node_opts_by_path); 891 892 /** 893 * of_find_node_by_name - Find a node by its "name" property 894 * @from: The node to start searching from or NULL; the node 895 * you pass will not be searched, only the next one 896 * will. Typically, you pass what the previous call 897 * returned. of_node_put() will be called on @from. 898 * @name: The name string to match against 899 * 900 * Return: A node pointer with refcount incremented, use 901 * of_node_put() on it when done. 902 */ 903 struct device_node *of_find_node_by_name(struct device_node *from, 904 const char *name) 905 { 906 struct device_node *np; 907 unsigned long flags; 908 909 raw_spin_lock_irqsave(&devtree_lock, flags); 910 for_each_of_allnodes_from(from, np) 911 if (of_node_name_eq(np, name) && of_node_get(np)) 912 break; 913 of_node_put(from); 914 raw_spin_unlock_irqrestore(&devtree_lock, flags); 915 return np; 916 } 917 EXPORT_SYMBOL(of_find_node_by_name); 918 919 /** 920 * of_find_node_by_type - Find a node by its "device_type" property 921 * @from: The node to start searching from, or NULL to start searching 922 * the entire device tree. The node you pass will not be 923 * searched, only the next one will; typically, you pass 924 * what the previous call returned. of_node_put() will be 925 * called on from for you. 926 * @type: The type string to match against 927 * 928 * Return: A node pointer with refcount incremented, use 929 * of_node_put() on it when done. 930 */ 931 struct device_node *of_find_node_by_type(struct device_node *from, 932 const char *type) 933 { 934 struct device_node *np; 935 unsigned long flags; 936 937 raw_spin_lock_irqsave(&devtree_lock, flags); 938 for_each_of_allnodes_from(from, np) 939 if (__of_node_is_type(np, type) && of_node_get(np)) 940 break; 941 of_node_put(from); 942 raw_spin_unlock_irqrestore(&devtree_lock, flags); 943 return np; 944 } 945 EXPORT_SYMBOL(of_find_node_by_type); 946 947 /** 948 * of_find_compatible_node - Find a node based on type and one of the 949 * tokens in its "compatible" property 950 * @from: The node to start searching from or NULL, the node 951 * you pass will not be searched, only the next one 952 * will; typically, you pass what the previous call 953 * returned. of_node_put() will be called on it 954 * @type: The type string to match "device_type" or NULL to ignore 955 * @compatible: The string to match to one of the tokens in the device 956 * "compatible" list. 957 * 958 * Return: A node pointer with refcount incremented, use 959 * of_node_put() on it when done. 960 */ 961 struct device_node *of_find_compatible_node(struct device_node *from, 962 const char *type, const char *compatible) 963 { 964 struct device_node *np; 965 unsigned long flags; 966 967 raw_spin_lock_irqsave(&devtree_lock, flags); 968 for_each_of_allnodes_from(from, np) 969 if (__of_device_is_compatible(np, compatible, type, NULL) && 970 of_node_get(np)) 971 break; 972 of_node_put(from); 973 raw_spin_unlock_irqrestore(&devtree_lock, flags); 974 return np; 975 } 976 EXPORT_SYMBOL(of_find_compatible_node); 977 978 /** 979 * of_find_node_with_property - Find a node which has a property with 980 * the given name. 981 * @from: The node to start searching from or NULL, the node 982 * you pass will not be searched, only the next one 983 * will; typically, you pass what the previous call 984 * returned. of_node_put() will be called on it 985 * @prop_name: The name of the property to look for. 986 * 987 * Return: A node pointer with refcount incremented, use 988 * of_node_put() on it when done. 989 */ 990 struct device_node *of_find_node_with_property(struct device_node *from, 991 const char *prop_name) 992 { 993 struct device_node *np; 994 const struct property *pp; 995 unsigned long flags; 996 997 raw_spin_lock_irqsave(&devtree_lock, flags); 998 for_each_of_allnodes_from(from, np) { 999 for (pp = np->properties; pp; pp = pp->next) { 1000 if (of_prop_cmp(pp->name, prop_name) == 0) { 1001 of_node_get(np); 1002 goto out; 1003 } 1004 } 1005 } 1006 out: 1007 of_node_put(from); 1008 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1009 return np; 1010 } 1011 EXPORT_SYMBOL(of_find_node_with_property); 1012 1013 static 1014 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 1015 const struct device_node *node) 1016 { 1017 const struct of_device_id *best_match = NULL; 1018 int score, best_score = 0; 1019 1020 if (!matches) 1021 return NULL; 1022 1023 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 1024 score = __of_device_is_compatible(node, matches->compatible, 1025 matches->type, matches->name); 1026 if (score > best_score) { 1027 best_match = matches; 1028 best_score = score; 1029 } 1030 } 1031 1032 return best_match; 1033 } 1034 1035 /** 1036 * of_match_node - Tell if a device_node has a matching of_match structure 1037 * @matches: array of of device match structures to search in 1038 * @node: the of device structure to match against 1039 * 1040 * Low level utility function used by device matching. 1041 */ 1042 const struct of_device_id *of_match_node(const struct of_device_id *matches, 1043 const struct device_node *node) 1044 { 1045 const struct of_device_id *match; 1046 unsigned long flags; 1047 1048 raw_spin_lock_irqsave(&devtree_lock, flags); 1049 match = __of_match_node(matches, node); 1050 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1051 return match; 1052 } 1053 EXPORT_SYMBOL(of_match_node); 1054 1055 /** 1056 * of_find_matching_node_and_match - Find a node based on an of_device_id 1057 * match table. 1058 * @from: The node to start searching from or NULL, the node 1059 * you pass will not be searched, only the next one 1060 * will; typically, you pass what the previous call 1061 * returned. of_node_put() will be called on it 1062 * @matches: array of of device match structures to search in 1063 * @match: Updated to point at the matches entry which matched 1064 * 1065 * Return: A node pointer with refcount incremented, use 1066 * of_node_put() on it when done. 1067 */ 1068 struct device_node *of_find_matching_node_and_match(struct device_node *from, 1069 const struct of_device_id *matches, 1070 const struct of_device_id **match) 1071 { 1072 struct device_node *np; 1073 const struct of_device_id *m; 1074 unsigned long flags; 1075 1076 if (match) 1077 *match = NULL; 1078 1079 raw_spin_lock_irqsave(&devtree_lock, flags); 1080 for_each_of_allnodes_from(from, np) { 1081 m = __of_match_node(matches, np); 1082 if (m && of_node_get(np)) { 1083 if (match) 1084 *match = m; 1085 break; 1086 } 1087 } 1088 of_node_put(from); 1089 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1090 return np; 1091 } 1092 EXPORT_SYMBOL(of_find_matching_node_and_match); 1093 1094 /** 1095 * of_alias_from_compatible - Lookup appropriate alias for a device node 1096 * depending on compatible 1097 * @node: pointer to a device tree node 1098 * @alias: Pointer to buffer that alias value will be copied into 1099 * @len: Length of alias value 1100 * 1101 * Based on the value of the compatible property, this routine will attempt 1102 * to choose an appropriate alias value for a particular device tree node. 1103 * It does this by stripping the manufacturer prefix (as delimited by a ',') 1104 * from the first entry in the compatible list property. 1105 * 1106 * Note: The matching on just the "product" side of the compatible is a relic 1107 * from I2C and SPI. Please do not add any new user. 1108 * 1109 * Return: This routine returns 0 on success, <0 on failure. 1110 */ 1111 int of_alias_from_compatible(const struct device_node *node, char *alias, int len) 1112 { 1113 const char *compatible, *p; 1114 int cplen; 1115 1116 compatible = of_get_property(node, "compatible", &cplen); 1117 if (!compatible || strlen(compatible) > cplen) 1118 return -ENODEV; 1119 p = strchr(compatible, ','); 1120 strscpy(alias, p ? p + 1 : compatible, len); 1121 return 0; 1122 } 1123 EXPORT_SYMBOL_GPL(of_alias_from_compatible); 1124 1125 /** 1126 * of_find_node_by_phandle - Find a node given a phandle 1127 * @handle: phandle of the node to find 1128 * 1129 * Return: A node pointer with refcount incremented, use 1130 * of_node_put() on it when done. 1131 */ 1132 struct device_node *of_find_node_by_phandle(phandle handle) 1133 { 1134 struct device_node *np = NULL; 1135 unsigned long flags; 1136 u32 handle_hash; 1137 1138 if (!handle) 1139 return NULL; 1140 1141 handle_hash = of_phandle_cache_hash(handle); 1142 1143 raw_spin_lock_irqsave(&devtree_lock, flags); 1144 1145 if (phandle_cache[handle_hash] && 1146 handle == phandle_cache[handle_hash]->phandle) 1147 np = phandle_cache[handle_hash]; 1148 1149 if (!np) { 1150 for_each_of_allnodes(np) 1151 if (np->phandle == handle && 1152 !of_node_check_flag(np, OF_DETACHED)) { 1153 phandle_cache[handle_hash] = np; 1154 break; 1155 } 1156 } 1157 1158 of_node_get(np); 1159 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1160 return np; 1161 } 1162 EXPORT_SYMBOL(of_find_node_by_phandle); 1163 1164 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) 1165 { 1166 int i; 1167 printk("%s %pOF", msg, args->np); 1168 for (i = 0; i < args->args_count; i++) { 1169 const char delim = i ? ',' : ':'; 1170 1171 pr_cont("%c%08x", delim, args->args[i]); 1172 } 1173 pr_cont("\n"); 1174 } 1175 1176 int of_phandle_iterator_init(struct of_phandle_iterator *it, 1177 const struct device_node *np, 1178 const char *list_name, 1179 const char *cells_name, 1180 int cell_count) 1181 { 1182 const __be32 *list; 1183 int size; 1184 1185 memset(it, 0, sizeof(*it)); 1186 1187 /* 1188 * one of cell_count or cells_name must be provided to determine the 1189 * argument length. 1190 */ 1191 if (cell_count < 0 && !cells_name) 1192 return -EINVAL; 1193 1194 list = of_get_property(np, list_name, &size); 1195 if (!list) 1196 return -ENOENT; 1197 1198 it->cells_name = cells_name; 1199 it->cell_count = cell_count; 1200 it->parent = np; 1201 it->list_end = list + size / sizeof(*list); 1202 it->phandle_end = list; 1203 it->cur = list; 1204 1205 return 0; 1206 } 1207 EXPORT_SYMBOL_GPL(of_phandle_iterator_init); 1208 1209 int of_phandle_iterator_next(struct of_phandle_iterator *it) 1210 { 1211 uint32_t count = 0; 1212 1213 if (it->node) { 1214 of_node_put(it->node); 1215 it->node = NULL; 1216 } 1217 1218 if (!it->cur || it->phandle_end >= it->list_end) 1219 return -ENOENT; 1220 1221 it->cur = it->phandle_end; 1222 1223 /* If phandle is 0, then it is an empty entry with no arguments. */ 1224 it->phandle = be32_to_cpup(it->cur++); 1225 1226 if (it->phandle) { 1227 1228 /* 1229 * Find the provider node and parse the #*-cells property to 1230 * determine the argument length. 1231 */ 1232 it->node = of_find_node_by_phandle(it->phandle); 1233 1234 if (it->cells_name) { 1235 if (!it->node) { 1236 pr_err("%pOF: could not find phandle %d\n", 1237 it->parent, it->phandle); 1238 goto err; 1239 } 1240 1241 if (of_property_read_u32(it->node, it->cells_name, 1242 &count)) { 1243 /* 1244 * If both cell_count and cells_name is given, 1245 * fall back to cell_count in absence 1246 * of the cells_name property 1247 */ 1248 if (it->cell_count >= 0) { 1249 count = it->cell_count; 1250 } else { 1251 pr_err("%pOF: could not get %s for %pOF\n", 1252 it->parent, 1253 it->cells_name, 1254 it->node); 1255 goto err; 1256 } 1257 } 1258 } else { 1259 count = it->cell_count; 1260 } 1261 1262 /* 1263 * Make sure that the arguments actually fit in the remaining 1264 * property data length 1265 */ 1266 if (it->cur + count > it->list_end) { 1267 if (it->cells_name) 1268 pr_err("%pOF: %s = %d found %td\n", 1269 it->parent, it->cells_name, 1270 count, it->list_end - it->cur); 1271 else 1272 pr_err("%pOF: phandle %s needs %d, found %td\n", 1273 it->parent, of_node_full_name(it->node), 1274 count, it->list_end - it->cur); 1275 goto err; 1276 } 1277 } 1278 1279 it->phandle_end = it->cur + count; 1280 it->cur_count = count; 1281 1282 return 0; 1283 1284 err: 1285 if (it->node) { 1286 of_node_put(it->node); 1287 it->node = NULL; 1288 } 1289 1290 return -EINVAL; 1291 } 1292 EXPORT_SYMBOL_GPL(of_phandle_iterator_next); 1293 1294 int of_phandle_iterator_args(struct of_phandle_iterator *it, 1295 uint32_t *args, 1296 int size) 1297 { 1298 int i, count; 1299 1300 count = it->cur_count; 1301 1302 if (WARN_ON(size < count)) 1303 count = size; 1304 1305 for (i = 0; i < count; i++) 1306 args[i] = be32_to_cpup(it->cur++); 1307 1308 return count; 1309 } 1310 1311 int __of_parse_phandle_with_args(const struct device_node *np, 1312 const char *list_name, 1313 const char *cells_name, 1314 int cell_count, int index, 1315 struct of_phandle_args *out_args) 1316 { 1317 struct of_phandle_iterator it; 1318 int rc, cur_index = 0; 1319 1320 if (index < 0) 1321 return -EINVAL; 1322 1323 /* Loop over the phandles until all the requested entry is found */ 1324 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { 1325 /* 1326 * All of the error cases bail out of the loop, so at 1327 * this point, the parsing is successful. If the requested 1328 * index matches, then fill the out_args structure and return, 1329 * or return -ENOENT for an empty entry. 1330 */ 1331 rc = -ENOENT; 1332 if (cur_index == index) { 1333 if (!it.phandle) 1334 goto err; 1335 1336 if (out_args) { 1337 int c; 1338 1339 c = of_phandle_iterator_args(&it, 1340 out_args->args, 1341 MAX_PHANDLE_ARGS); 1342 out_args->np = it.node; 1343 out_args->args_count = c; 1344 } else { 1345 of_node_put(it.node); 1346 } 1347 1348 /* Found it! return success */ 1349 return 0; 1350 } 1351 1352 cur_index++; 1353 } 1354 1355 /* 1356 * Unlock node before returning result; will be one of: 1357 * -ENOENT : index is for empty phandle 1358 * -EINVAL : parsing error on data 1359 */ 1360 1361 err: 1362 of_node_put(it.node); 1363 return rc; 1364 } 1365 EXPORT_SYMBOL(__of_parse_phandle_with_args); 1366 1367 /** 1368 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it 1369 * @np: pointer to a device tree node containing a list 1370 * @list_name: property name that contains a list 1371 * @stem_name: stem of property names that specify phandles' arguments count 1372 * @index: index of a phandle to parse out 1373 * @out_args: optional pointer to output arguments structure (will be filled) 1374 * 1375 * This function is useful to parse lists of phandles and their arguments. 1376 * Returns 0 on success and fills out_args, on error returns appropriate errno 1377 * value. The difference between this function and of_parse_phandle_with_args() 1378 * is that this API remaps a phandle if the node the phandle points to has 1379 * a <@stem_name>-map property. 1380 * 1381 * Caller is responsible to call of_node_put() on the returned out_args->np 1382 * pointer. 1383 * 1384 * Example:: 1385 * 1386 * phandle1: node1 { 1387 * #list-cells = <2>; 1388 * }; 1389 * 1390 * phandle2: node2 { 1391 * #list-cells = <1>; 1392 * }; 1393 * 1394 * phandle3: node3 { 1395 * #list-cells = <1>; 1396 * list-map = <0 &phandle2 3>, 1397 * <1 &phandle2 2>, 1398 * <2 &phandle1 5 1>; 1399 * list-map-mask = <0x3>; 1400 * }; 1401 * 1402 * node4 { 1403 * list = <&phandle1 1 2 &phandle3 0>; 1404 * }; 1405 * 1406 * To get a device_node of the ``node2`` node you may call this: 1407 * of_parse_phandle_with_args(node4, "list", "list", 1, &args); 1408 */ 1409 int of_parse_phandle_with_args_map(const struct device_node *np, 1410 const char *list_name, 1411 const char *stem_name, 1412 int index, struct of_phandle_args *out_args) 1413 { 1414 char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); 1415 char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name); 1416 char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); 1417 char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); 1418 struct device_node *cur, *new = NULL; 1419 const __be32 *map, *mask, *pass; 1420 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; 1421 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) }; 1422 __be32 initial_match_array[MAX_PHANDLE_ARGS]; 1423 const __be32 *match_array = initial_match_array; 1424 int i, ret, map_len, match; 1425 u32 list_size, new_size; 1426 1427 if (index < 0) 1428 return -EINVAL; 1429 1430 if (!cells_name || !map_name || !mask_name || !pass_name) 1431 return -ENOMEM; 1432 1433 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, 1434 out_args); 1435 if (ret) 1436 return ret; 1437 1438 /* Get the #<list>-cells property */ 1439 cur = out_args->np; 1440 ret = of_property_read_u32(cur, cells_name, &list_size); 1441 if (ret < 0) 1442 goto put; 1443 1444 /* Precalculate the match array - this simplifies match loop */ 1445 for (i = 0; i < list_size; i++) 1446 initial_match_array[i] = cpu_to_be32(out_args->args[i]); 1447 1448 ret = -EINVAL; 1449 while (cur) { 1450 /* Get the <list>-map property */ 1451 map = of_get_property(cur, map_name, &map_len); 1452 if (!map) { 1453 return 0; 1454 } 1455 map_len /= sizeof(u32); 1456 1457 /* Get the <list>-map-mask property (optional) */ 1458 mask = of_get_property(cur, mask_name, NULL); 1459 if (!mask) 1460 mask = dummy_mask; 1461 /* Iterate through <list>-map property */ 1462 match = 0; 1463 while (map_len > (list_size + 1) && !match) { 1464 /* Compare specifiers */ 1465 match = 1; 1466 for (i = 0; i < list_size; i++, map_len--) 1467 match &= !((match_array[i] ^ *map++) & mask[i]); 1468 1469 of_node_put(new); 1470 new = of_find_node_by_phandle(be32_to_cpup(map)); 1471 map++; 1472 map_len--; 1473 1474 /* Check if not found */ 1475 if (!new) { 1476 ret = -EINVAL; 1477 goto put; 1478 } 1479 1480 if (!of_device_is_available(new)) 1481 match = 0; 1482 1483 ret = of_property_read_u32(new, cells_name, &new_size); 1484 if (ret) 1485 goto put; 1486 1487 /* Check for malformed properties */ 1488 if (WARN_ON(new_size > MAX_PHANDLE_ARGS) || 1489 map_len < new_size) { 1490 ret = -EINVAL; 1491 goto put; 1492 } 1493 1494 /* Move forward by new node's #<list>-cells amount */ 1495 map += new_size; 1496 map_len -= new_size; 1497 } 1498 if (!match) { 1499 ret = -ENOENT; 1500 goto put; 1501 } 1502 1503 /* Get the <list>-map-pass-thru property (optional) */ 1504 pass = of_get_property(cur, pass_name, NULL); 1505 if (!pass) 1506 pass = dummy_pass; 1507 1508 /* 1509 * Successfully parsed a <list>-map translation; copy new 1510 * specifier into the out_args structure, keeping the 1511 * bits specified in <list>-map-pass-thru. 1512 */ 1513 match_array = map - new_size; 1514 for (i = 0; i < new_size; i++) { 1515 __be32 val = *(map - new_size + i); 1516 1517 if (i < list_size) { 1518 val &= ~pass[i]; 1519 val |= cpu_to_be32(out_args->args[i]) & pass[i]; 1520 } 1521 1522 out_args->args[i] = be32_to_cpu(val); 1523 } 1524 out_args->args_count = list_size = new_size; 1525 /* Iterate again with new provider */ 1526 out_args->np = new; 1527 of_node_put(cur); 1528 cur = new; 1529 new = NULL; 1530 } 1531 put: 1532 of_node_put(cur); 1533 of_node_put(new); 1534 return ret; 1535 } 1536 EXPORT_SYMBOL(of_parse_phandle_with_args_map); 1537 1538 /** 1539 * of_count_phandle_with_args() - Find the number of phandles references in a property 1540 * @np: pointer to a device tree node containing a list 1541 * @list_name: property name that contains a list 1542 * @cells_name: property name that specifies phandles' arguments count 1543 * 1544 * Return: The number of phandle + argument tuples within a property. It 1545 * is a typical pattern to encode a list of phandle and variable 1546 * arguments into a single property. The number of arguments is encoded 1547 * by a property in the phandle-target node. For example, a gpios 1548 * property would contain a list of GPIO specifies consisting of a 1549 * phandle and 1 or more arguments. The number of arguments are 1550 * determined by the #gpio-cells property in the node pointed to by the 1551 * phandle. 1552 */ 1553 int of_count_phandle_with_args(const struct device_node *np, const char *list_name, 1554 const char *cells_name) 1555 { 1556 struct of_phandle_iterator it; 1557 int rc, cur_index = 0; 1558 1559 /* 1560 * If cells_name is NULL we assume a cell count of 0. This makes 1561 * counting the phandles trivial as each 32bit word in the list is a 1562 * phandle and no arguments are to consider. So we don't iterate through 1563 * the list but just use the length to determine the phandle count. 1564 */ 1565 if (!cells_name) { 1566 const __be32 *list; 1567 int size; 1568 1569 list = of_get_property(np, list_name, &size); 1570 if (!list) 1571 return -ENOENT; 1572 1573 return size / sizeof(*list); 1574 } 1575 1576 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); 1577 if (rc) 1578 return rc; 1579 1580 while ((rc = of_phandle_iterator_next(&it)) == 0) 1581 cur_index += 1; 1582 1583 if (rc != -ENOENT) 1584 return rc; 1585 1586 return cur_index; 1587 } 1588 EXPORT_SYMBOL(of_count_phandle_with_args); 1589 1590 static struct property *__of_remove_property_from_list(struct property **list, struct property *prop) 1591 { 1592 struct property **next; 1593 1594 for (next = list; *next; next = &(*next)->next) { 1595 if (*next == prop) { 1596 *next = prop->next; 1597 prop->next = NULL; 1598 return prop; 1599 } 1600 } 1601 return NULL; 1602 } 1603 1604 /** 1605 * __of_add_property - Add a property to a node without lock operations 1606 * @np: Caller's Device Node 1607 * @prop: Property to add 1608 */ 1609 int __of_add_property(struct device_node *np, struct property *prop) 1610 { 1611 int rc = 0; 1612 unsigned long flags; 1613 struct property **next; 1614 1615 raw_spin_lock_irqsave(&devtree_lock, flags); 1616 1617 __of_remove_property_from_list(&np->deadprops, prop); 1618 1619 prop->next = NULL; 1620 next = &np->properties; 1621 while (*next) { 1622 if (strcmp(prop->name, (*next)->name) == 0) { 1623 /* duplicate ! don't insert it */ 1624 rc = -EEXIST; 1625 goto out_unlock; 1626 } 1627 next = &(*next)->next; 1628 } 1629 *next = prop; 1630 1631 out_unlock: 1632 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1633 if (rc) 1634 return rc; 1635 1636 __of_add_property_sysfs(np, prop); 1637 return 0; 1638 } 1639 1640 /** 1641 * of_add_property - Add a property to a node 1642 * @np: Caller's Device Node 1643 * @prop: Property to add 1644 */ 1645 int of_add_property(struct device_node *np, struct property *prop) 1646 { 1647 int rc; 1648 1649 mutex_lock(&of_mutex); 1650 rc = __of_add_property(np, prop); 1651 mutex_unlock(&of_mutex); 1652 1653 if (!rc) 1654 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); 1655 1656 return rc; 1657 } 1658 EXPORT_SYMBOL_GPL(of_add_property); 1659 1660 int __of_remove_property(struct device_node *np, struct property *prop) 1661 { 1662 unsigned long flags; 1663 int rc = -ENODEV; 1664 1665 raw_spin_lock_irqsave(&devtree_lock, flags); 1666 1667 if (__of_remove_property_from_list(&np->properties, prop)) { 1668 /* Found the property, add it to deadprops list */ 1669 prop->next = np->deadprops; 1670 np->deadprops = prop; 1671 rc = 0; 1672 } 1673 1674 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1675 if (rc) 1676 return rc; 1677 1678 __of_remove_property_sysfs(np, prop); 1679 return 0; 1680 } 1681 1682 /** 1683 * of_remove_property - Remove a property from a node. 1684 * @np: Caller's Device Node 1685 * @prop: Property to remove 1686 * 1687 * Note that we don't actually remove it, since we have given out 1688 * who-knows-how-many pointers to the data using get-property. 1689 * Instead we just move the property to the "dead properties" 1690 * list, so it won't be found any more. 1691 */ 1692 int of_remove_property(struct device_node *np, struct property *prop) 1693 { 1694 int rc; 1695 1696 if (!prop) 1697 return -ENODEV; 1698 1699 mutex_lock(&of_mutex); 1700 rc = __of_remove_property(np, prop); 1701 mutex_unlock(&of_mutex); 1702 1703 if (!rc) 1704 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); 1705 1706 return rc; 1707 } 1708 EXPORT_SYMBOL_GPL(of_remove_property); 1709 1710 int __of_update_property(struct device_node *np, struct property *newprop, 1711 struct property **oldpropp) 1712 { 1713 struct property **next, *oldprop; 1714 unsigned long flags; 1715 1716 raw_spin_lock_irqsave(&devtree_lock, flags); 1717 1718 __of_remove_property_from_list(&np->deadprops, newprop); 1719 1720 for (next = &np->properties; *next; next = &(*next)->next) { 1721 if (of_prop_cmp((*next)->name, newprop->name) == 0) 1722 break; 1723 } 1724 *oldpropp = oldprop = *next; 1725 1726 if (oldprop) { 1727 /* replace the node */ 1728 newprop->next = oldprop->next; 1729 *next = newprop; 1730 oldprop->next = np->deadprops; 1731 np->deadprops = oldprop; 1732 } else { 1733 /* new node */ 1734 newprop->next = NULL; 1735 *next = newprop; 1736 } 1737 1738 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1739 1740 __of_update_property_sysfs(np, newprop, oldprop); 1741 1742 return 0; 1743 } 1744 1745 /* 1746 * of_update_property - Update a property in a node, if the property does 1747 * not exist, add it. 1748 * 1749 * Note that we don't actually remove it, since we have given out 1750 * who-knows-how-many pointers to the data using get-property. 1751 * Instead we just move the property to the "dead properties" list, 1752 * and add the new property to the property list 1753 */ 1754 int of_update_property(struct device_node *np, struct property *newprop) 1755 { 1756 struct property *oldprop; 1757 int rc; 1758 1759 if (!newprop->name) 1760 return -EINVAL; 1761 1762 mutex_lock(&of_mutex); 1763 rc = __of_update_property(np, newprop, &oldprop); 1764 mutex_unlock(&of_mutex); 1765 1766 if (!rc) 1767 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); 1768 1769 return rc; 1770 } 1771 1772 static void of_alias_add(struct alias_prop *ap, struct device_node *np, 1773 int id, const char *stem, int stem_len) 1774 { 1775 ap->np = np; 1776 ap->id = id; 1777 strscpy(ap->stem, stem, stem_len + 1); 1778 list_add_tail(&ap->link, &aliases_lookup); 1779 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", 1780 ap->alias, ap->stem, ap->id, np); 1781 } 1782 1783 /** 1784 * of_alias_scan - Scan all properties of the 'aliases' node 1785 * @dt_alloc: An allocator that provides a virtual address to memory 1786 * for storing the resulting tree 1787 * 1788 * The function scans all the properties of the 'aliases' node and populates 1789 * the global lookup table with the properties. It returns the 1790 * number of alias properties found, or an error code in case of failure. 1791 */ 1792 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) 1793 { 1794 const struct property *pp; 1795 1796 of_aliases = of_find_node_by_path("/aliases"); 1797 of_chosen = of_find_node_by_path("/chosen"); 1798 if (of_chosen == NULL) 1799 of_chosen = of_find_node_by_path("/chosen@0"); 1800 1801 if (of_chosen) { 1802 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ 1803 const char *name = NULL; 1804 1805 if (of_property_read_string(of_chosen, "stdout-path", &name)) 1806 of_property_read_string(of_chosen, "linux,stdout-path", 1807 &name); 1808 if (IS_ENABLED(CONFIG_PPC) && !name) 1809 of_property_read_string(of_aliases, "stdout", &name); 1810 if (name) 1811 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1812 if (of_stdout) 1813 of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT; 1814 } 1815 1816 if (!of_aliases) 1817 return; 1818 1819 for_each_property_of_node(of_aliases, pp) { 1820 const char *start = pp->name; 1821 const char *end = start + strlen(start); 1822 struct device_node *np; 1823 struct alias_prop *ap; 1824 int id, len; 1825 1826 /* Skip those we do not want to proceed */ 1827 if (!strcmp(pp->name, "name") || 1828 !strcmp(pp->name, "phandle") || 1829 !strcmp(pp->name, "linux,phandle")) 1830 continue; 1831 1832 np = of_find_node_by_path(pp->value); 1833 if (!np) 1834 continue; 1835 1836 /* walk the alias backwards to extract the id and work out 1837 * the 'stem' string */ 1838 while (isdigit(*(end-1)) && end > start) 1839 end--; 1840 len = end - start; 1841 1842 if (kstrtoint(end, 10, &id) < 0) 1843 continue; 1844 1845 /* Allocate an alias_prop with enough space for the stem */ 1846 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); 1847 if (!ap) 1848 continue; 1849 memset(ap, 0, sizeof(*ap) + len + 1); 1850 ap->alias = start; 1851 of_alias_add(ap, np, id, start, len); 1852 } 1853 } 1854 1855 /** 1856 * of_alias_get_id - Get alias id for the given device_node 1857 * @np: Pointer to the given device_node 1858 * @stem: Alias stem of the given device_node 1859 * 1860 * The function travels the lookup table to get the alias id for the given 1861 * device_node and alias stem. 1862 * 1863 * Return: The alias id if found. 1864 */ 1865 int of_alias_get_id(const struct device_node *np, const char *stem) 1866 { 1867 struct alias_prop *app; 1868 int id = -ENODEV; 1869 1870 mutex_lock(&of_mutex); 1871 list_for_each_entry(app, &aliases_lookup, link) { 1872 if (strcmp(app->stem, stem) != 0) 1873 continue; 1874 1875 if (np == app->np) { 1876 id = app->id; 1877 break; 1878 } 1879 } 1880 mutex_unlock(&of_mutex); 1881 1882 return id; 1883 } 1884 EXPORT_SYMBOL_GPL(of_alias_get_id); 1885 1886 /** 1887 * of_alias_get_highest_id - Get highest alias id for the given stem 1888 * @stem: Alias stem to be examined 1889 * 1890 * The function travels the lookup table to get the highest alias id for the 1891 * given alias stem. It returns the alias id if found. 1892 */ 1893 int of_alias_get_highest_id(const char *stem) 1894 { 1895 struct alias_prop *app; 1896 int id = -ENODEV; 1897 1898 mutex_lock(&of_mutex); 1899 list_for_each_entry(app, &aliases_lookup, link) { 1900 if (strcmp(app->stem, stem) != 0) 1901 continue; 1902 1903 if (app->id > id) 1904 id = app->id; 1905 } 1906 mutex_unlock(&of_mutex); 1907 1908 return id; 1909 } 1910 EXPORT_SYMBOL_GPL(of_alias_get_highest_id); 1911 1912 /** 1913 * of_console_check() - Test and setup console for DT setup 1914 * @dn: Pointer to device node 1915 * @name: Name to use for preferred console without index. ex. "ttyS" 1916 * @index: Index to use for preferred console. 1917 * 1918 * Check if the given device node matches the stdout-path property in the 1919 * /chosen node. If it does then register it as the preferred console. 1920 * 1921 * Return: TRUE if console successfully setup. Otherwise return FALSE. 1922 */ 1923 bool of_console_check(const struct device_node *dn, char *name, int index) 1924 { 1925 if (!dn || dn != of_stdout || console_set_on_cmdline) 1926 return false; 1927 1928 /* 1929 * XXX: cast `options' to char pointer to suppress complication 1930 * warnings: printk, UART and console drivers expect char pointer. 1931 */ 1932 return !add_preferred_console(name, index, (char *)of_stdout_options); 1933 } 1934 EXPORT_SYMBOL_GPL(of_console_check); 1935 1936 /** 1937 * of_find_next_cache_node - Find a node's subsidiary cache 1938 * @np: node of type "cpu" or "cache" 1939 * 1940 * Return: A node pointer with refcount incremented, use 1941 * of_node_put() on it when done. Caller should hold a reference 1942 * to np. 1943 */ 1944 struct device_node *of_find_next_cache_node(const struct device_node *np) 1945 { 1946 struct device_node *child, *cache_node; 1947 1948 cache_node = of_parse_phandle(np, "l2-cache", 0); 1949 if (!cache_node) 1950 cache_node = of_parse_phandle(np, "next-level-cache", 0); 1951 1952 if (cache_node) 1953 return cache_node; 1954 1955 /* OF on pmac has nodes instead of properties named "l2-cache" 1956 * beneath CPU nodes. 1957 */ 1958 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) 1959 for_each_child_of_node(np, child) 1960 if (of_node_is_type(child, "cache")) 1961 return child; 1962 1963 return NULL; 1964 } 1965 1966 /** 1967 * of_find_last_cache_level - Find the level at which the last cache is 1968 * present for the given logical cpu 1969 * 1970 * @cpu: cpu number(logical index) for which the last cache level is needed 1971 * 1972 * Return: The level at which the last cache is present. It is exactly 1973 * same as the total number of cache levels for the given logical cpu. 1974 */ 1975 int of_find_last_cache_level(unsigned int cpu) 1976 { 1977 u32 cache_level = 0; 1978 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); 1979 1980 while (np) { 1981 of_node_put(prev); 1982 prev = np; 1983 np = of_find_next_cache_node(np); 1984 } 1985 1986 of_property_read_u32(prev, "cache-level", &cache_level); 1987 of_node_put(prev); 1988 1989 return cache_level; 1990 } 1991 1992 /** 1993 * of_map_id - Translate an ID through a downstream mapping. 1994 * @np: root complex device node. 1995 * @id: device ID to map. 1996 * @map_name: property name of the map to use. 1997 * @map_mask_name: optional property name of the mask to use. 1998 * @target: optional pointer to a target device node. 1999 * @id_out: optional pointer to receive the translated ID. 2000 * 2001 * Given a device ID, look up the appropriate implementation-defined 2002 * platform ID and/or the target device which receives transactions on that 2003 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or 2004 * @id_out may be NULL if only the other is required. If @target points to 2005 * a non-NULL device node pointer, only entries targeting that node will be 2006 * matched; if it points to a NULL value, it will receive the device node of 2007 * the first matching target phandle, with a reference held. 2008 * 2009 * Return: 0 on success or a standard error code on failure. 2010 */ 2011 int of_map_id(const struct device_node *np, u32 id, 2012 const char *map_name, const char *map_mask_name, 2013 struct device_node **target, u32 *id_out) 2014 { 2015 u32 map_mask, masked_id; 2016 int map_len; 2017 const __be32 *map = NULL; 2018 2019 if (!np || !map_name || (!target && !id_out)) 2020 return -EINVAL; 2021 2022 map = of_get_property(np, map_name, &map_len); 2023 if (!map) { 2024 if (target) 2025 return -ENODEV; 2026 /* Otherwise, no map implies no translation */ 2027 *id_out = id; 2028 return 0; 2029 } 2030 2031 if (!map_len || map_len % (4 * sizeof(*map))) { 2032 pr_err("%pOF: Error: Bad %s length: %d\n", np, 2033 map_name, map_len); 2034 return -EINVAL; 2035 } 2036 2037 /* The default is to select all bits. */ 2038 map_mask = 0xffffffff; 2039 2040 /* 2041 * Can be overridden by "{iommu,msi}-map-mask" property. 2042 * If of_property_read_u32() fails, the default is used. 2043 */ 2044 if (map_mask_name) 2045 of_property_read_u32(np, map_mask_name, &map_mask); 2046 2047 masked_id = map_mask & id; 2048 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { 2049 struct device_node *phandle_node; 2050 u32 id_base = be32_to_cpup(map + 0); 2051 u32 phandle = be32_to_cpup(map + 1); 2052 u32 out_base = be32_to_cpup(map + 2); 2053 u32 id_len = be32_to_cpup(map + 3); 2054 2055 if (id_base & ~map_mask) { 2056 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", 2057 np, map_name, map_name, 2058 map_mask, id_base); 2059 return -EFAULT; 2060 } 2061 2062 if (masked_id < id_base || masked_id >= id_base + id_len) 2063 continue; 2064 2065 phandle_node = of_find_node_by_phandle(phandle); 2066 if (!phandle_node) 2067 return -ENODEV; 2068 2069 if (target) { 2070 if (*target) 2071 of_node_put(phandle_node); 2072 else 2073 *target = phandle_node; 2074 2075 if (*target != phandle_node) 2076 continue; 2077 } 2078 2079 if (id_out) 2080 *id_out = masked_id - id_base + out_base; 2081 2082 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", 2083 np, map_name, map_mask, id_base, out_base, 2084 id_len, id, masked_id - id_base + out_base); 2085 return 0; 2086 } 2087 2088 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, 2089 id, target && *target ? *target : NULL); 2090 2091 /* Bypasses translation */ 2092 if (id_out) 2093 *id_out = id; 2094 return 0; 2095 } 2096 EXPORT_SYMBOL_GPL(of_map_id); 2097