1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Procedures for creating, accessing and interpreting the device tree. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 * 11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net 12 * 13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and 14 * Grant Likely. 15 */ 16 17 #define pr_fmt(fmt) "OF: " fmt 18 19 #include <linux/cleanup.h> 20 #include <linux/console.h> 21 #include <linux/ctype.h> 22 #include <linux/cpu.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/of_device.h> 26 #include <linux/of_graph.h> 27 #include <linux/spinlock.h> 28 #include <linux/slab.h> 29 #include <linux/string.h> 30 #include <linux/proc_fs.h> 31 32 #include "of_private.h" 33 34 LIST_HEAD(aliases_lookup); 35 36 struct device_node *of_root; 37 EXPORT_SYMBOL(of_root); 38 struct device_node *of_chosen; 39 EXPORT_SYMBOL(of_chosen); 40 struct device_node *of_aliases; 41 struct device_node *of_stdout; 42 static const char *of_stdout_options; 43 44 struct kset *of_kset; 45 46 /* 47 * Used to protect the of_aliases, to hold off addition of nodes to sysfs. 48 * This mutex must be held whenever modifications are being made to the 49 * device tree. The of_{attach,detach}_node() and 50 * of_{add,remove,update}_property() helpers make sure this happens. 51 */ 52 DEFINE_MUTEX(of_mutex); 53 54 /* use when traversing tree through the child, sibling, 55 * or parent members of struct device_node. 56 */ 57 DEFINE_RAW_SPINLOCK(devtree_lock); 58 59 bool of_node_name_eq(const struct device_node *np, const char *name) 60 { 61 const char *node_name; 62 size_t len; 63 64 if (!np) 65 return false; 66 67 node_name = kbasename(np->full_name); 68 len = strchrnul(node_name, '@') - node_name; 69 70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); 71 } 72 EXPORT_SYMBOL(of_node_name_eq); 73 74 bool of_node_name_prefix(const struct device_node *np, const char *prefix) 75 { 76 if (!np) 77 return false; 78 79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; 80 } 81 EXPORT_SYMBOL(of_node_name_prefix); 82 83 static bool __of_node_is_type(const struct device_node *np, const char *type) 84 { 85 const char *match = __of_get_property(np, "device_type", NULL); 86 87 return np && match && type && !strcmp(match, type); 88 } 89 90 #define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \ 91 IS_ENABLED(CONFIG_SPARC) \ 92 ) 93 94 int of_bus_n_addr_cells(struct device_node *np) 95 { 96 u32 cells; 97 98 for (; np; np = np->parent) { 99 if (!of_property_read_u32(np, "#address-cells", &cells)) 100 return cells; 101 /* 102 * Default root value and walking parent nodes for "#address-cells" 103 * is deprecated. Any platforms which hit this warning should 104 * be added to the excluded list. 105 */ 106 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 107 "Missing '#address-cells' in %pOF\n", np); 108 } 109 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 110 } 111 112 int of_n_addr_cells(struct device_node *np) 113 { 114 if (np->parent) 115 np = np->parent; 116 117 return of_bus_n_addr_cells(np); 118 } 119 EXPORT_SYMBOL(of_n_addr_cells); 120 121 int of_bus_n_size_cells(struct device_node *np) 122 { 123 u32 cells; 124 125 for (; np; np = np->parent) { 126 if (!of_property_read_u32(np, "#size-cells", &cells)) 127 return cells; 128 /* 129 * Default root value and walking parent nodes for "#size-cells" 130 * is deprecated. Any platforms which hit this warning should 131 * be added to the excluded list. 132 */ 133 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 134 "Missing '#size-cells' in %pOF\n", np); 135 } 136 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 137 } 138 139 int of_n_size_cells(struct device_node *np) 140 { 141 if (np->parent) 142 np = np->parent; 143 144 return of_bus_n_size_cells(np); 145 } 146 EXPORT_SYMBOL(of_n_size_cells); 147 148 #ifdef CONFIG_NUMA 149 int __weak of_node_to_nid(struct device_node *np) 150 { 151 return NUMA_NO_NODE; 152 } 153 #endif 154 155 #define OF_PHANDLE_CACHE_BITS 7 156 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) 157 158 static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; 159 160 static u32 of_phandle_cache_hash(phandle handle) 161 { 162 return hash_32(handle, OF_PHANDLE_CACHE_BITS); 163 } 164 165 /* 166 * Caller must hold devtree_lock. 167 */ 168 void __of_phandle_cache_inv_entry(phandle handle) 169 { 170 u32 handle_hash; 171 struct device_node *np; 172 173 if (!handle) 174 return; 175 176 handle_hash = of_phandle_cache_hash(handle); 177 178 np = phandle_cache[handle_hash]; 179 if (np && handle == np->phandle) 180 phandle_cache[handle_hash] = NULL; 181 } 182 183 void __init of_core_init(void) 184 { 185 struct device_node *np; 186 187 of_platform_register_reconfig_notifier(); 188 189 /* Create the kset, and register existing nodes */ 190 mutex_lock(&of_mutex); 191 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 192 if (!of_kset) { 193 mutex_unlock(&of_mutex); 194 pr_err("failed to register existing nodes\n"); 195 return; 196 } 197 for_each_of_allnodes(np) { 198 __of_attach_node_sysfs(np); 199 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) 200 phandle_cache[of_phandle_cache_hash(np->phandle)] = np; 201 } 202 mutex_unlock(&of_mutex); 203 204 /* Symlink in /proc as required by userspace ABI */ 205 if (of_root) 206 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 207 } 208 209 static struct property *__of_find_property(const struct device_node *np, 210 const char *name, int *lenp) 211 { 212 struct property *pp; 213 214 if (!np) 215 return NULL; 216 217 for (pp = np->properties; pp; pp = pp->next) { 218 if (of_prop_cmp(pp->name, name) == 0) { 219 if (lenp) 220 *lenp = pp->length; 221 break; 222 } 223 } 224 225 return pp; 226 } 227 228 struct property *of_find_property(const struct device_node *np, 229 const char *name, 230 int *lenp) 231 { 232 struct property *pp; 233 unsigned long flags; 234 235 raw_spin_lock_irqsave(&devtree_lock, flags); 236 pp = __of_find_property(np, name, lenp); 237 raw_spin_unlock_irqrestore(&devtree_lock, flags); 238 239 return pp; 240 } 241 EXPORT_SYMBOL(of_find_property); 242 243 struct device_node *__of_find_all_nodes(struct device_node *prev) 244 { 245 struct device_node *np; 246 if (!prev) { 247 np = of_root; 248 } else if (prev->child) { 249 np = prev->child; 250 } else { 251 /* Walk back up looking for a sibling, or the end of the structure */ 252 np = prev; 253 while (np->parent && !np->sibling) 254 np = np->parent; 255 np = np->sibling; /* Might be null at the end of the tree */ 256 } 257 return np; 258 } 259 260 /** 261 * of_find_all_nodes - Get next node in global list 262 * @prev: Previous node or NULL to start iteration 263 * of_node_put() will be called on it 264 * 265 * Return: A node pointer with refcount incremented, use 266 * of_node_put() on it when done. 267 */ 268 struct device_node *of_find_all_nodes(struct device_node *prev) 269 { 270 struct device_node *np; 271 unsigned long flags; 272 273 raw_spin_lock_irqsave(&devtree_lock, flags); 274 np = __of_find_all_nodes(prev); 275 of_node_get(np); 276 of_node_put(prev); 277 raw_spin_unlock_irqrestore(&devtree_lock, flags); 278 return np; 279 } 280 EXPORT_SYMBOL(of_find_all_nodes); 281 282 /* 283 * Find a property with a given name for a given node 284 * and return the value. 285 */ 286 const void *__of_get_property(const struct device_node *np, 287 const char *name, int *lenp) 288 { 289 const struct property *pp = __of_find_property(np, name, lenp); 290 291 return pp ? pp->value : NULL; 292 } 293 294 /* 295 * Find a property with a given name for a given node 296 * and return the value. 297 */ 298 const void *of_get_property(const struct device_node *np, const char *name, 299 int *lenp) 300 { 301 const struct property *pp = of_find_property(np, name, lenp); 302 303 return pp ? pp->value : NULL; 304 } 305 EXPORT_SYMBOL(of_get_property); 306 307 /** 308 * __of_device_is_compatible() - Check if the node matches given constraints 309 * @device: pointer to node 310 * @compat: required compatible string, NULL or "" for any match 311 * @type: required device_type value, NULL or "" for any match 312 * @name: required node name, NULL or "" for any match 313 * 314 * Checks if the given @compat, @type and @name strings match the 315 * properties of the given @device. A constraints can be skipped by 316 * passing NULL or an empty string as the constraint. 317 * 318 * Returns 0 for no match, and a positive integer on match. The return 319 * value is a relative score with larger values indicating better 320 * matches. The score is weighted for the most specific compatible value 321 * to get the highest score. Matching type is next, followed by matching 322 * name. Practically speaking, this results in the following priority 323 * order for matches: 324 * 325 * 1. specific compatible && type && name 326 * 2. specific compatible && type 327 * 3. specific compatible && name 328 * 4. specific compatible 329 * 5. general compatible && type && name 330 * 6. general compatible && type 331 * 7. general compatible && name 332 * 8. general compatible 333 * 9. type && name 334 * 10. type 335 * 11. name 336 */ 337 static int __of_device_is_compatible(const struct device_node *device, 338 const char *compat, const char *type, const char *name) 339 { 340 const struct property *prop; 341 const char *cp; 342 int index = 0, score = 0; 343 344 /* Compatible match has highest priority */ 345 if (compat && compat[0]) { 346 prop = __of_find_property(device, "compatible", NULL); 347 for (cp = of_prop_next_string(prop, NULL); cp; 348 cp = of_prop_next_string(prop, cp), index++) { 349 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 350 score = INT_MAX/2 - (index << 2); 351 break; 352 } 353 } 354 if (!score) 355 return 0; 356 } 357 358 /* Matching type is better than matching name */ 359 if (type && type[0]) { 360 if (!__of_node_is_type(device, type)) 361 return 0; 362 score += 2; 363 } 364 365 /* Matching name is a bit better than not */ 366 if (name && name[0]) { 367 if (!of_node_name_eq(device, name)) 368 return 0; 369 score++; 370 } 371 372 return score; 373 } 374 375 /** Checks if the given "compat" string matches one of the strings in 376 * the device's "compatible" property 377 */ 378 int of_device_is_compatible(const struct device_node *device, 379 const char *compat) 380 { 381 unsigned long flags; 382 int res; 383 384 raw_spin_lock_irqsave(&devtree_lock, flags); 385 res = __of_device_is_compatible(device, compat, NULL, NULL); 386 raw_spin_unlock_irqrestore(&devtree_lock, flags); 387 return res; 388 } 389 EXPORT_SYMBOL(of_device_is_compatible); 390 391 /** Checks if the device is compatible with any of the entries in 392 * a NULL terminated array of strings. Returns the best match 393 * score or 0. 394 */ 395 int of_device_compatible_match(const struct device_node *device, 396 const char *const *compat) 397 { 398 unsigned int tmp, score = 0; 399 400 if (!compat) 401 return 0; 402 403 while (*compat) { 404 tmp = of_device_is_compatible(device, *compat); 405 if (tmp > score) 406 score = tmp; 407 compat++; 408 } 409 410 return score; 411 } 412 EXPORT_SYMBOL_GPL(of_device_compatible_match); 413 414 /** 415 * of_machine_compatible_match - Test root of device tree against a compatible array 416 * @compats: NULL terminated array of compatible strings to look for in root node's compatible property. 417 * 418 * Returns true if the root node has any of the given compatible values in its 419 * compatible property. 420 */ 421 bool of_machine_compatible_match(const char *const *compats) 422 { 423 struct device_node *root; 424 int rc = 0; 425 426 root = of_find_node_by_path("/"); 427 if (root) { 428 rc = of_device_compatible_match(root, compats); 429 of_node_put(root); 430 } 431 432 return rc != 0; 433 } 434 EXPORT_SYMBOL(of_machine_compatible_match); 435 436 static bool __of_device_is_status(const struct device_node *device, 437 const char * const*strings) 438 { 439 const char *status; 440 int statlen; 441 442 if (!device) 443 return false; 444 445 status = __of_get_property(device, "status", &statlen); 446 if (status == NULL) 447 return false; 448 449 if (statlen > 0) { 450 while (*strings) { 451 unsigned int len = strlen(*strings); 452 453 if ((*strings)[len - 1] == '-') { 454 if (!strncmp(status, *strings, len)) 455 return true; 456 } else { 457 if (!strcmp(status, *strings)) 458 return true; 459 } 460 strings++; 461 } 462 } 463 464 return false; 465 } 466 467 /** 468 * __of_device_is_available - check if a device is available for use 469 * 470 * @device: Node to check for availability, with locks already held 471 * 472 * Return: True if the status property is absent or set to "okay" or "ok", 473 * false otherwise 474 */ 475 static bool __of_device_is_available(const struct device_node *device) 476 { 477 static const char * const ok[] = {"okay", "ok", NULL}; 478 479 if (!device) 480 return false; 481 482 return !__of_get_property(device, "status", NULL) || 483 __of_device_is_status(device, ok); 484 } 485 486 /** 487 * __of_device_is_reserved - check if a device is reserved 488 * 489 * @device: Node to check for availability, with locks already held 490 * 491 * Return: True if the status property is set to "reserved", false otherwise 492 */ 493 static bool __of_device_is_reserved(const struct device_node *device) 494 { 495 static const char * const reserved[] = {"reserved", NULL}; 496 497 return __of_device_is_status(device, reserved); 498 } 499 500 /** 501 * of_device_is_available - check if a device is available for use 502 * 503 * @device: Node to check for availability 504 * 505 * Return: True if the status property is absent or set to "okay" or "ok", 506 * false otherwise 507 */ 508 bool of_device_is_available(const struct device_node *device) 509 { 510 unsigned long flags; 511 bool res; 512 513 raw_spin_lock_irqsave(&devtree_lock, flags); 514 res = __of_device_is_available(device); 515 raw_spin_unlock_irqrestore(&devtree_lock, flags); 516 return res; 517 518 } 519 EXPORT_SYMBOL(of_device_is_available); 520 521 /** 522 * __of_device_is_fail - check if a device has status "fail" or "fail-..." 523 * 524 * @device: Node to check status for, with locks already held 525 * 526 * Return: True if the status property is set to "fail" or "fail-..." (for any 527 * error code suffix), false otherwise 528 */ 529 static bool __of_device_is_fail(const struct device_node *device) 530 { 531 static const char * const fail[] = {"fail", "fail-", NULL}; 532 533 return __of_device_is_status(device, fail); 534 } 535 536 /** 537 * of_device_is_big_endian - check if a device has BE registers 538 * 539 * @device: Node to check for endianness 540 * 541 * Return: True if the device has a "big-endian" property, or if the kernel 542 * was compiled for BE *and* the device has a "native-endian" property. 543 * Returns false otherwise. 544 * 545 * Callers would nominally use ioread32be/iowrite32be if 546 * of_device_is_big_endian() == true, or readl/writel otherwise. 547 */ 548 bool of_device_is_big_endian(const struct device_node *device) 549 { 550 if (of_property_read_bool(device, "big-endian")) 551 return true; 552 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && 553 of_property_read_bool(device, "native-endian")) 554 return true; 555 return false; 556 } 557 EXPORT_SYMBOL(of_device_is_big_endian); 558 559 /** 560 * of_get_parent - Get a node's parent if any 561 * @node: Node to get parent 562 * 563 * Return: A node pointer with refcount incremented, use 564 * of_node_put() on it when done. 565 */ 566 struct device_node *of_get_parent(const struct device_node *node) 567 { 568 struct device_node *np; 569 unsigned long flags; 570 571 if (!node) 572 return NULL; 573 574 raw_spin_lock_irqsave(&devtree_lock, flags); 575 np = of_node_get(node->parent); 576 raw_spin_unlock_irqrestore(&devtree_lock, flags); 577 return np; 578 } 579 EXPORT_SYMBOL(of_get_parent); 580 581 /** 582 * of_get_next_parent - Iterate to a node's parent 583 * @node: Node to get parent of 584 * 585 * This is like of_get_parent() except that it drops the 586 * refcount on the passed node, making it suitable for iterating 587 * through a node's parents. 588 * 589 * Return: A node pointer with refcount incremented, use 590 * of_node_put() on it when done. 591 */ 592 struct device_node *of_get_next_parent(struct device_node *node) 593 { 594 struct device_node *parent; 595 unsigned long flags; 596 597 if (!node) 598 return NULL; 599 600 raw_spin_lock_irqsave(&devtree_lock, flags); 601 parent = of_node_get(node->parent); 602 of_node_put(node); 603 raw_spin_unlock_irqrestore(&devtree_lock, flags); 604 return parent; 605 } 606 EXPORT_SYMBOL(of_get_next_parent); 607 608 static struct device_node *__of_get_next_child(const struct device_node *node, 609 struct device_node *prev) 610 { 611 struct device_node *next; 612 613 if (!node) 614 return NULL; 615 616 next = prev ? prev->sibling : node->child; 617 of_node_get(next); 618 of_node_put(prev); 619 return next; 620 } 621 #define __for_each_child_of_node(parent, child) \ 622 for (child = __of_get_next_child(parent, NULL); child != NULL; \ 623 child = __of_get_next_child(parent, child)) 624 625 /** 626 * of_get_next_child - Iterate a node childs 627 * @node: parent node 628 * @prev: previous child of the parent node, or NULL to get first 629 * 630 * Return: A node pointer with refcount incremented, use of_node_put() on 631 * it when done. Returns NULL when prev is the last child. Decrements the 632 * refcount of prev. 633 */ 634 struct device_node *of_get_next_child(const struct device_node *node, 635 struct device_node *prev) 636 { 637 struct device_node *next; 638 unsigned long flags; 639 640 raw_spin_lock_irqsave(&devtree_lock, flags); 641 next = __of_get_next_child(node, prev); 642 raw_spin_unlock_irqrestore(&devtree_lock, flags); 643 return next; 644 } 645 EXPORT_SYMBOL(of_get_next_child); 646 647 /** 648 * of_get_next_child_with_prefix - Find the next child node with prefix 649 * @node: parent node 650 * @prev: previous child of the parent node, or NULL to get first 651 * @prefix: prefix that the node name should have 652 * 653 * This function is like of_get_next_child(), except that it automatically 654 * skips any nodes whose name doesn't have the given prefix. 655 * 656 * Return: A node pointer with refcount incremented, use 657 * of_node_put() on it when done. 658 */ 659 struct device_node *of_get_next_child_with_prefix(const struct device_node *node, 660 struct device_node *prev, 661 const char *prefix) 662 { 663 struct device_node *next; 664 unsigned long flags; 665 666 if (!node) 667 return NULL; 668 669 raw_spin_lock_irqsave(&devtree_lock, flags); 670 next = prev ? prev->sibling : node->child; 671 for (; next; next = next->sibling) { 672 if (!of_node_name_prefix(next, prefix)) 673 continue; 674 if (of_node_get(next)) 675 break; 676 } 677 of_node_put(prev); 678 raw_spin_unlock_irqrestore(&devtree_lock, flags); 679 return next; 680 } 681 EXPORT_SYMBOL(of_get_next_child_with_prefix); 682 683 static struct device_node *of_get_next_status_child(const struct device_node *node, 684 struct device_node *prev, 685 bool (*checker)(const struct device_node *)) 686 { 687 struct device_node *next; 688 unsigned long flags; 689 690 if (!node) 691 return NULL; 692 693 raw_spin_lock_irqsave(&devtree_lock, flags); 694 next = prev ? prev->sibling : node->child; 695 for (; next; next = next->sibling) { 696 if (!checker(next)) 697 continue; 698 if (of_node_get(next)) 699 break; 700 } 701 of_node_put(prev); 702 raw_spin_unlock_irqrestore(&devtree_lock, flags); 703 return next; 704 } 705 706 /** 707 * of_get_next_available_child - Find the next available child node 708 * @node: parent node 709 * @prev: previous child of the parent node, or NULL to get first 710 * 711 * This function is like of_get_next_child(), except that it 712 * automatically skips any disabled nodes (i.e. status = "disabled"). 713 */ 714 struct device_node *of_get_next_available_child(const struct device_node *node, 715 struct device_node *prev) 716 { 717 return of_get_next_status_child(node, prev, __of_device_is_available); 718 } 719 EXPORT_SYMBOL(of_get_next_available_child); 720 721 /** 722 * of_get_next_reserved_child - Find the next reserved child node 723 * @node: parent node 724 * @prev: previous child of the parent node, or NULL to get first 725 * 726 * This function is like of_get_next_child(), except that it 727 * automatically skips any disabled nodes (i.e. status = "disabled"). 728 */ 729 struct device_node *of_get_next_reserved_child(const struct device_node *node, 730 struct device_node *prev) 731 { 732 return of_get_next_status_child(node, prev, __of_device_is_reserved); 733 } 734 EXPORT_SYMBOL(of_get_next_reserved_child); 735 736 /** 737 * of_get_next_cpu_node - Iterate on cpu nodes 738 * @prev: previous child of the /cpus node, or NULL to get first 739 * 740 * Unusable CPUs (those with the status property set to "fail" or "fail-...") 741 * will be skipped. 742 * 743 * Return: A cpu node pointer with refcount incremented, use of_node_put() 744 * on it when done. Returns NULL when prev is the last child. Decrements 745 * the refcount of prev. 746 */ 747 struct device_node *of_get_next_cpu_node(struct device_node *prev) 748 { 749 struct device_node *next = NULL; 750 unsigned long flags; 751 struct device_node *node; 752 753 if (!prev) 754 node = of_find_node_by_path("/cpus"); 755 756 raw_spin_lock_irqsave(&devtree_lock, flags); 757 if (prev) 758 next = prev->sibling; 759 else if (node) { 760 next = node->child; 761 of_node_put(node); 762 } 763 for (; next; next = next->sibling) { 764 if (__of_device_is_fail(next)) 765 continue; 766 if (!(of_node_name_eq(next, "cpu") || 767 __of_node_is_type(next, "cpu"))) 768 continue; 769 if (of_node_get(next)) 770 break; 771 } 772 of_node_put(prev); 773 raw_spin_unlock_irqrestore(&devtree_lock, flags); 774 return next; 775 } 776 EXPORT_SYMBOL(of_get_next_cpu_node); 777 778 /** 779 * of_get_compatible_child - Find compatible child node 780 * @parent: parent node 781 * @compatible: compatible string 782 * 783 * Lookup child node whose compatible property contains the given compatible 784 * string. 785 * 786 * Return: a node pointer with refcount incremented, use of_node_put() on it 787 * when done; or NULL if not found. 788 */ 789 struct device_node *of_get_compatible_child(const struct device_node *parent, 790 const char *compatible) 791 { 792 struct device_node *child; 793 794 for_each_child_of_node(parent, child) { 795 if (of_device_is_compatible(child, compatible)) 796 break; 797 } 798 799 return child; 800 } 801 EXPORT_SYMBOL(of_get_compatible_child); 802 803 /** 804 * of_get_child_by_name - Find the child node by name for a given parent 805 * @node: parent node 806 * @name: child name to look for. 807 * 808 * This function looks for child node for given matching name 809 * 810 * Return: A node pointer if found, with refcount incremented, use 811 * of_node_put() on it when done. 812 * Returns NULL if node is not found. 813 */ 814 struct device_node *of_get_child_by_name(const struct device_node *node, 815 const char *name) 816 { 817 struct device_node *child; 818 819 for_each_child_of_node(node, child) 820 if (of_node_name_eq(child, name)) 821 break; 822 return child; 823 } 824 EXPORT_SYMBOL(of_get_child_by_name); 825 826 struct device_node *__of_find_node_by_path(const struct device_node *parent, 827 const char *path) 828 { 829 struct device_node *child; 830 int len; 831 832 len = strcspn(path, "/:"); 833 if (!len) 834 return NULL; 835 836 __for_each_child_of_node(parent, child) { 837 const char *name = kbasename(child->full_name); 838 if (strncmp(path, name, len) == 0 && (strlen(name) == len)) 839 return child; 840 } 841 return NULL; 842 } 843 844 struct device_node *__of_find_node_by_full_path(struct device_node *node, 845 const char *path) 846 { 847 const char *separator = strchr(path, ':'); 848 849 while (node && *path == '/') { 850 struct device_node *tmp = node; 851 852 path++; /* Increment past '/' delimiter */ 853 node = __of_find_node_by_path(node, path); 854 of_node_put(tmp); 855 path = strchrnul(path, '/'); 856 if (separator && separator < path) 857 break; 858 } 859 return node; 860 } 861 862 /** 863 * of_find_node_opts_by_path - Find a node matching a full OF path 864 * @path: Either the full path to match, or if the path does not 865 * start with '/', the name of a property of the /aliases 866 * node (an alias). In the case of an alias, the node 867 * matching the alias' value will be returned. 868 * @opts: Address of a pointer into which to store the start of 869 * an options string appended to the end of the path with 870 * a ':' separator. 871 * 872 * Valid paths: 873 * * /foo/bar Full path 874 * * foo Valid alias 875 * * foo/bar Valid alias + relative path 876 * 877 * Return: A node pointer with refcount incremented, use 878 * of_node_put() on it when done. 879 */ 880 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) 881 { 882 struct device_node *np = NULL; 883 const struct property *pp; 884 unsigned long flags; 885 const char *separator = strchr(path, ':'); 886 887 if (opts) 888 *opts = separator ? separator + 1 : NULL; 889 890 if (strcmp(path, "/") == 0) 891 return of_node_get(of_root); 892 893 /* The path could begin with an alias */ 894 if (*path != '/') { 895 int len; 896 const char *p = separator; 897 898 if (!p) 899 p = strchrnul(path, '/'); 900 len = p - path; 901 902 /* of_aliases must not be NULL */ 903 if (!of_aliases) 904 return NULL; 905 906 for_each_property_of_node(of_aliases, pp) { 907 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { 908 np = of_find_node_by_path(pp->value); 909 break; 910 } 911 } 912 if (!np) 913 return NULL; 914 path = p; 915 } 916 917 /* Step down the tree matching path components */ 918 raw_spin_lock_irqsave(&devtree_lock, flags); 919 if (!np) 920 np = of_node_get(of_root); 921 np = __of_find_node_by_full_path(np, path); 922 raw_spin_unlock_irqrestore(&devtree_lock, flags); 923 return np; 924 } 925 EXPORT_SYMBOL(of_find_node_opts_by_path); 926 927 /** 928 * of_find_node_by_name - Find a node by its "name" property 929 * @from: The node to start searching from or NULL; the node 930 * you pass will not be searched, only the next one 931 * will. Typically, you pass what the previous call 932 * returned. of_node_put() will be called on @from. 933 * @name: The name string to match against 934 * 935 * Return: A node pointer with refcount incremented, use 936 * of_node_put() on it when done. 937 */ 938 struct device_node *of_find_node_by_name(struct device_node *from, 939 const char *name) 940 { 941 struct device_node *np; 942 unsigned long flags; 943 944 raw_spin_lock_irqsave(&devtree_lock, flags); 945 for_each_of_allnodes_from(from, np) 946 if (of_node_name_eq(np, name) && of_node_get(np)) 947 break; 948 of_node_put(from); 949 raw_spin_unlock_irqrestore(&devtree_lock, flags); 950 return np; 951 } 952 EXPORT_SYMBOL(of_find_node_by_name); 953 954 /** 955 * of_find_node_by_type - Find a node by its "device_type" property 956 * @from: The node to start searching from, or NULL to start searching 957 * the entire device tree. The node you pass will not be 958 * searched, only the next one will; typically, you pass 959 * what the previous call returned. of_node_put() will be 960 * called on from for you. 961 * @type: The type string to match against 962 * 963 * Return: A node pointer with refcount incremented, use 964 * of_node_put() on it when done. 965 */ 966 struct device_node *of_find_node_by_type(struct device_node *from, 967 const char *type) 968 { 969 struct device_node *np; 970 unsigned long flags; 971 972 raw_spin_lock_irqsave(&devtree_lock, flags); 973 for_each_of_allnodes_from(from, np) 974 if (__of_node_is_type(np, type) && of_node_get(np)) 975 break; 976 of_node_put(from); 977 raw_spin_unlock_irqrestore(&devtree_lock, flags); 978 return np; 979 } 980 EXPORT_SYMBOL(of_find_node_by_type); 981 982 /** 983 * of_find_compatible_node - Find a node based on type and one of the 984 * tokens in its "compatible" property 985 * @from: The node to start searching from or NULL, the node 986 * you pass will not be searched, only the next one 987 * will; typically, you pass what the previous call 988 * returned. of_node_put() will be called on it 989 * @type: The type string to match "device_type" or NULL to ignore 990 * @compatible: The string to match to one of the tokens in the device 991 * "compatible" list. 992 * 993 * Return: A node pointer with refcount incremented, use 994 * of_node_put() on it when done. 995 */ 996 struct device_node *of_find_compatible_node(struct device_node *from, 997 const char *type, const char *compatible) 998 { 999 struct device_node *np; 1000 unsigned long flags; 1001 1002 raw_spin_lock_irqsave(&devtree_lock, flags); 1003 for_each_of_allnodes_from(from, np) 1004 if (__of_device_is_compatible(np, compatible, type, NULL) && 1005 of_node_get(np)) 1006 break; 1007 of_node_put(from); 1008 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1009 return np; 1010 } 1011 EXPORT_SYMBOL(of_find_compatible_node); 1012 1013 /** 1014 * of_find_node_with_property - Find a node which has a property with 1015 * the given name. 1016 * @from: The node to start searching from or NULL, the node 1017 * you pass will not be searched, only the next one 1018 * will; typically, you pass what the previous call 1019 * returned. of_node_put() will be called on it 1020 * @prop_name: The name of the property to look for. 1021 * 1022 * Return: A node pointer with refcount incremented, use 1023 * of_node_put() on it when done. 1024 */ 1025 struct device_node *of_find_node_with_property(struct device_node *from, 1026 const char *prop_name) 1027 { 1028 struct device_node *np; 1029 const struct property *pp; 1030 unsigned long flags; 1031 1032 raw_spin_lock_irqsave(&devtree_lock, flags); 1033 for_each_of_allnodes_from(from, np) { 1034 for (pp = np->properties; pp; pp = pp->next) { 1035 if (of_prop_cmp(pp->name, prop_name) == 0) { 1036 of_node_get(np); 1037 goto out; 1038 } 1039 } 1040 } 1041 out: 1042 of_node_put(from); 1043 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1044 return np; 1045 } 1046 EXPORT_SYMBOL(of_find_node_with_property); 1047 1048 static 1049 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 1050 const struct device_node *node) 1051 { 1052 const struct of_device_id *best_match = NULL; 1053 int score, best_score = 0; 1054 1055 if (!matches) 1056 return NULL; 1057 1058 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 1059 score = __of_device_is_compatible(node, matches->compatible, 1060 matches->type, matches->name); 1061 if (score > best_score) { 1062 best_match = matches; 1063 best_score = score; 1064 } 1065 } 1066 1067 return best_match; 1068 } 1069 1070 /** 1071 * of_match_node - Tell if a device_node has a matching of_match structure 1072 * @matches: array of of device match structures to search in 1073 * @node: the of device structure to match against 1074 * 1075 * Low level utility function used by device matching. 1076 */ 1077 const struct of_device_id *of_match_node(const struct of_device_id *matches, 1078 const struct device_node *node) 1079 { 1080 const struct of_device_id *match; 1081 unsigned long flags; 1082 1083 raw_spin_lock_irqsave(&devtree_lock, flags); 1084 match = __of_match_node(matches, node); 1085 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1086 return match; 1087 } 1088 EXPORT_SYMBOL(of_match_node); 1089 1090 /** 1091 * of_find_matching_node_and_match - Find a node based on an of_device_id 1092 * match table. 1093 * @from: The node to start searching from or NULL, the node 1094 * you pass will not be searched, only the next one 1095 * will; typically, you pass what the previous call 1096 * returned. of_node_put() will be called on it 1097 * @matches: array of of device match structures to search in 1098 * @match: Updated to point at the matches entry which matched 1099 * 1100 * Return: A node pointer with refcount incremented, use 1101 * of_node_put() on it when done. 1102 */ 1103 struct device_node *of_find_matching_node_and_match(struct device_node *from, 1104 const struct of_device_id *matches, 1105 const struct of_device_id **match) 1106 { 1107 struct device_node *np; 1108 const struct of_device_id *m; 1109 unsigned long flags; 1110 1111 if (match) 1112 *match = NULL; 1113 1114 raw_spin_lock_irqsave(&devtree_lock, flags); 1115 for_each_of_allnodes_from(from, np) { 1116 m = __of_match_node(matches, np); 1117 if (m && of_node_get(np)) { 1118 if (match) 1119 *match = m; 1120 break; 1121 } 1122 } 1123 of_node_put(from); 1124 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1125 return np; 1126 } 1127 EXPORT_SYMBOL(of_find_matching_node_and_match); 1128 1129 /** 1130 * of_alias_from_compatible - Lookup appropriate alias for a device node 1131 * depending on compatible 1132 * @node: pointer to a device tree node 1133 * @alias: Pointer to buffer that alias value will be copied into 1134 * @len: Length of alias value 1135 * 1136 * Based on the value of the compatible property, this routine will attempt 1137 * to choose an appropriate alias value for a particular device tree node. 1138 * It does this by stripping the manufacturer prefix (as delimited by a ',') 1139 * from the first entry in the compatible list property. 1140 * 1141 * Note: The matching on just the "product" side of the compatible is a relic 1142 * from I2C and SPI. Please do not add any new user. 1143 * 1144 * Return: This routine returns 0 on success, <0 on failure. 1145 */ 1146 int of_alias_from_compatible(const struct device_node *node, char *alias, int len) 1147 { 1148 const char *compatible, *p; 1149 int cplen; 1150 1151 compatible = of_get_property(node, "compatible", &cplen); 1152 if (!compatible || strlen(compatible) > cplen) 1153 return -ENODEV; 1154 p = strchr(compatible, ','); 1155 strscpy(alias, p ? p + 1 : compatible, len); 1156 return 0; 1157 } 1158 EXPORT_SYMBOL_GPL(of_alias_from_compatible); 1159 1160 /** 1161 * of_find_node_by_phandle - Find a node given a phandle 1162 * @handle: phandle of the node to find 1163 * 1164 * Return: A node pointer with refcount incremented, use 1165 * of_node_put() on it when done. 1166 */ 1167 struct device_node *of_find_node_by_phandle(phandle handle) 1168 { 1169 struct device_node *np = NULL; 1170 unsigned long flags; 1171 u32 handle_hash; 1172 1173 if (!handle) 1174 return NULL; 1175 1176 handle_hash = of_phandle_cache_hash(handle); 1177 1178 raw_spin_lock_irqsave(&devtree_lock, flags); 1179 1180 if (phandle_cache[handle_hash] && 1181 handle == phandle_cache[handle_hash]->phandle) 1182 np = phandle_cache[handle_hash]; 1183 1184 if (!np) { 1185 for_each_of_allnodes(np) 1186 if (np->phandle == handle && 1187 !of_node_check_flag(np, OF_DETACHED)) { 1188 phandle_cache[handle_hash] = np; 1189 break; 1190 } 1191 } 1192 1193 of_node_get(np); 1194 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1195 return np; 1196 } 1197 EXPORT_SYMBOL(of_find_node_by_phandle); 1198 1199 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) 1200 { 1201 int i; 1202 printk("%s %pOF", msg, args->np); 1203 for (i = 0; i < args->args_count; i++) { 1204 const char delim = i ? ',' : ':'; 1205 1206 pr_cont("%c%08x", delim, args->args[i]); 1207 } 1208 pr_cont("\n"); 1209 } 1210 1211 int of_phandle_iterator_init(struct of_phandle_iterator *it, 1212 const struct device_node *np, 1213 const char *list_name, 1214 const char *cells_name, 1215 int cell_count) 1216 { 1217 const __be32 *list; 1218 int size; 1219 1220 memset(it, 0, sizeof(*it)); 1221 1222 /* 1223 * one of cell_count or cells_name must be provided to determine the 1224 * argument length. 1225 */ 1226 if (cell_count < 0 && !cells_name) 1227 return -EINVAL; 1228 1229 list = of_get_property(np, list_name, &size); 1230 if (!list) 1231 return -ENOENT; 1232 1233 it->cells_name = cells_name; 1234 it->cell_count = cell_count; 1235 it->parent = np; 1236 it->list_end = list + size / sizeof(*list); 1237 it->phandle_end = list; 1238 it->cur = list; 1239 1240 return 0; 1241 } 1242 EXPORT_SYMBOL_GPL(of_phandle_iterator_init); 1243 1244 int of_phandle_iterator_next(struct of_phandle_iterator *it) 1245 { 1246 uint32_t count = 0; 1247 1248 if (it->node) { 1249 of_node_put(it->node); 1250 it->node = NULL; 1251 } 1252 1253 if (!it->cur || it->phandle_end >= it->list_end) 1254 return -ENOENT; 1255 1256 it->cur = it->phandle_end; 1257 1258 /* If phandle is 0, then it is an empty entry with no arguments. */ 1259 it->phandle = be32_to_cpup(it->cur++); 1260 1261 if (it->phandle) { 1262 1263 /* 1264 * Find the provider node and parse the #*-cells property to 1265 * determine the argument length. 1266 */ 1267 it->node = of_find_node_by_phandle(it->phandle); 1268 1269 if (it->cells_name) { 1270 if (!it->node) { 1271 pr_err("%pOF: could not find phandle %d\n", 1272 it->parent, it->phandle); 1273 goto err; 1274 } 1275 1276 if (of_property_read_u32(it->node, it->cells_name, 1277 &count)) { 1278 /* 1279 * If both cell_count and cells_name is given, 1280 * fall back to cell_count in absence 1281 * of the cells_name property 1282 */ 1283 if (it->cell_count >= 0) { 1284 count = it->cell_count; 1285 } else { 1286 pr_err("%pOF: could not get %s for %pOF\n", 1287 it->parent, 1288 it->cells_name, 1289 it->node); 1290 goto err; 1291 } 1292 } 1293 } else { 1294 count = it->cell_count; 1295 } 1296 1297 /* 1298 * Make sure that the arguments actually fit in the remaining 1299 * property data length 1300 */ 1301 if (it->cur + count > it->list_end) { 1302 if (it->cells_name) 1303 pr_err("%pOF: %s = %d found %td\n", 1304 it->parent, it->cells_name, 1305 count, it->list_end - it->cur); 1306 else 1307 pr_err("%pOF: phandle %s needs %d, found %td\n", 1308 it->parent, of_node_full_name(it->node), 1309 count, it->list_end - it->cur); 1310 goto err; 1311 } 1312 } 1313 1314 it->phandle_end = it->cur + count; 1315 it->cur_count = count; 1316 1317 return 0; 1318 1319 err: 1320 if (it->node) { 1321 of_node_put(it->node); 1322 it->node = NULL; 1323 } 1324 1325 return -EINVAL; 1326 } 1327 EXPORT_SYMBOL_GPL(of_phandle_iterator_next); 1328 1329 int of_phandle_iterator_args(struct of_phandle_iterator *it, 1330 uint32_t *args, 1331 int size) 1332 { 1333 int i, count; 1334 1335 count = it->cur_count; 1336 1337 if (WARN_ON(size < count)) 1338 count = size; 1339 1340 for (i = 0; i < count; i++) 1341 args[i] = be32_to_cpup(it->cur++); 1342 1343 return count; 1344 } 1345 1346 int __of_parse_phandle_with_args(const struct device_node *np, 1347 const char *list_name, 1348 const char *cells_name, 1349 int cell_count, int index, 1350 struct of_phandle_args *out_args) 1351 { 1352 struct of_phandle_iterator it; 1353 int rc, cur_index = 0; 1354 1355 if (index < 0) 1356 return -EINVAL; 1357 1358 /* Loop over the phandles until all the requested entry is found */ 1359 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { 1360 /* 1361 * All of the error cases bail out of the loop, so at 1362 * this point, the parsing is successful. If the requested 1363 * index matches, then fill the out_args structure and return, 1364 * or return -ENOENT for an empty entry. 1365 */ 1366 rc = -ENOENT; 1367 if (cur_index == index) { 1368 if (!it.phandle) 1369 goto err; 1370 1371 if (out_args) { 1372 int c; 1373 1374 c = of_phandle_iterator_args(&it, 1375 out_args->args, 1376 MAX_PHANDLE_ARGS); 1377 out_args->np = it.node; 1378 out_args->args_count = c; 1379 } else { 1380 of_node_put(it.node); 1381 } 1382 1383 /* Found it! return success */ 1384 return 0; 1385 } 1386 1387 cur_index++; 1388 } 1389 1390 /* 1391 * Unlock node before returning result; will be one of: 1392 * -ENOENT : index is for empty phandle 1393 * -EINVAL : parsing error on data 1394 */ 1395 1396 err: 1397 of_node_put(it.node); 1398 return rc; 1399 } 1400 EXPORT_SYMBOL(__of_parse_phandle_with_args); 1401 1402 /** 1403 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it 1404 * @np: pointer to a device tree node containing a list 1405 * @list_name: property name that contains a list 1406 * @stem_name: stem of property names that specify phandles' arguments count 1407 * @index: index of a phandle to parse out 1408 * @out_args: optional pointer to output arguments structure (will be filled) 1409 * 1410 * This function is useful to parse lists of phandles and their arguments. 1411 * Returns 0 on success and fills out_args, on error returns appropriate errno 1412 * value. The difference between this function and of_parse_phandle_with_args() 1413 * is that this API remaps a phandle if the node the phandle points to has 1414 * a <@stem_name>-map property. 1415 * 1416 * Caller is responsible to call of_node_put() on the returned out_args->np 1417 * pointer. 1418 * 1419 * Example:: 1420 * 1421 * phandle1: node1 { 1422 * #list-cells = <2>; 1423 * }; 1424 * 1425 * phandle2: node2 { 1426 * #list-cells = <1>; 1427 * }; 1428 * 1429 * phandle3: node3 { 1430 * #list-cells = <1>; 1431 * list-map = <0 &phandle2 3>, 1432 * <1 &phandle2 2>, 1433 * <2 &phandle1 5 1>; 1434 * list-map-mask = <0x3>; 1435 * }; 1436 * 1437 * node4 { 1438 * list = <&phandle1 1 2 &phandle3 0>; 1439 * }; 1440 * 1441 * To get a device_node of the ``node2`` node you may call this: 1442 * of_parse_phandle_with_args(node4, "list", "list", 1, &args); 1443 */ 1444 int of_parse_phandle_with_args_map(const struct device_node *np, 1445 const char *list_name, 1446 const char *stem_name, 1447 int index, struct of_phandle_args *out_args) 1448 { 1449 char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); 1450 char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name); 1451 char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); 1452 char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); 1453 struct device_node *cur, *new = NULL; 1454 const __be32 *map, *mask, *pass; 1455 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; 1456 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) }; 1457 __be32 initial_match_array[MAX_PHANDLE_ARGS]; 1458 const __be32 *match_array = initial_match_array; 1459 int i, ret, map_len, match; 1460 u32 list_size, new_size; 1461 1462 if (index < 0) 1463 return -EINVAL; 1464 1465 if (!cells_name || !map_name || !mask_name || !pass_name) 1466 return -ENOMEM; 1467 1468 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, 1469 out_args); 1470 if (ret) 1471 return ret; 1472 1473 /* Get the #<list>-cells property */ 1474 cur = out_args->np; 1475 ret = of_property_read_u32(cur, cells_name, &list_size); 1476 if (ret < 0) 1477 goto put; 1478 1479 /* Precalculate the match array - this simplifies match loop */ 1480 for (i = 0; i < list_size; i++) 1481 initial_match_array[i] = cpu_to_be32(out_args->args[i]); 1482 1483 ret = -EINVAL; 1484 while (cur) { 1485 /* Get the <list>-map property */ 1486 map = of_get_property(cur, map_name, &map_len); 1487 if (!map) { 1488 return 0; 1489 } 1490 map_len /= sizeof(u32); 1491 1492 /* Get the <list>-map-mask property (optional) */ 1493 mask = of_get_property(cur, mask_name, NULL); 1494 if (!mask) 1495 mask = dummy_mask; 1496 /* Iterate through <list>-map property */ 1497 match = 0; 1498 while (map_len > (list_size + 1) && !match) { 1499 /* Compare specifiers */ 1500 match = 1; 1501 for (i = 0; i < list_size; i++, map_len--) 1502 match &= !((match_array[i] ^ *map++) & mask[i]); 1503 1504 of_node_put(new); 1505 new = of_find_node_by_phandle(be32_to_cpup(map)); 1506 map++; 1507 map_len--; 1508 1509 /* Check if not found */ 1510 if (!new) 1511 goto put; 1512 1513 if (!of_device_is_available(new)) 1514 match = 0; 1515 1516 ret = of_property_read_u32(new, cells_name, &new_size); 1517 if (ret) 1518 goto put; 1519 1520 /* Check for malformed properties */ 1521 if (WARN_ON(new_size > MAX_PHANDLE_ARGS)) 1522 goto put; 1523 if (map_len < new_size) 1524 goto put; 1525 1526 /* Move forward by new node's #<list>-cells amount */ 1527 map += new_size; 1528 map_len -= new_size; 1529 } 1530 if (!match) 1531 goto put; 1532 1533 /* Get the <list>-map-pass-thru property (optional) */ 1534 pass = of_get_property(cur, pass_name, NULL); 1535 if (!pass) 1536 pass = dummy_pass; 1537 1538 /* 1539 * Successfully parsed a <list>-map translation; copy new 1540 * specifier into the out_args structure, keeping the 1541 * bits specified in <list>-map-pass-thru. 1542 */ 1543 match_array = map - new_size; 1544 for (i = 0; i < new_size; i++) { 1545 __be32 val = *(map - new_size + i); 1546 1547 if (i < list_size) { 1548 val &= ~pass[i]; 1549 val |= cpu_to_be32(out_args->args[i]) & pass[i]; 1550 } 1551 1552 out_args->args[i] = be32_to_cpu(val); 1553 } 1554 out_args->args_count = list_size = new_size; 1555 /* Iterate again with new provider */ 1556 out_args->np = new; 1557 of_node_put(cur); 1558 cur = new; 1559 new = NULL; 1560 } 1561 put: 1562 of_node_put(cur); 1563 of_node_put(new); 1564 return ret; 1565 } 1566 EXPORT_SYMBOL(of_parse_phandle_with_args_map); 1567 1568 /** 1569 * of_count_phandle_with_args() - Find the number of phandles references in a property 1570 * @np: pointer to a device tree node containing a list 1571 * @list_name: property name that contains a list 1572 * @cells_name: property name that specifies phandles' arguments count 1573 * 1574 * Return: The number of phandle + argument tuples within a property. It 1575 * is a typical pattern to encode a list of phandle and variable 1576 * arguments into a single property. The number of arguments is encoded 1577 * by a property in the phandle-target node. For example, a gpios 1578 * property would contain a list of GPIO specifies consisting of a 1579 * phandle and 1 or more arguments. The number of arguments are 1580 * determined by the #gpio-cells property in the node pointed to by the 1581 * phandle. 1582 */ 1583 int of_count_phandle_with_args(const struct device_node *np, const char *list_name, 1584 const char *cells_name) 1585 { 1586 struct of_phandle_iterator it; 1587 int rc, cur_index = 0; 1588 1589 /* 1590 * If cells_name is NULL we assume a cell count of 0. This makes 1591 * counting the phandles trivial as each 32bit word in the list is a 1592 * phandle and no arguments are to consider. So we don't iterate through 1593 * the list but just use the length to determine the phandle count. 1594 */ 1595 if (!cells_name) { 1596 const __be32 *list; 1597 int size; 1598 1599 list = of_get_property(np, list_name, &size); 1600 if (!list) 1601 return -ENOENT; 1602 1603 return size / sizeof(*list); 1604 } 1605 1606 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); 1607 if (rc) 1608 return rc; 1609 1610 while ((rc = of_phandle_iterator_next(&it)) == 0) 1611 cur_index += 1; 1612 1613 if (rc != -ENOENT) 1614 return rc; 1615 1616 return cur_index; 1617 } 1618 EXPORT_SYMBOL(of_count_phandle_with_args); 1619 1620 static struct property *__of_remove_property_from_list(struct property **list, struct property *prop) 1621 { 1622 struct property **next; 1623 1624 for (next = list; *next; next = &(*next)->next) { 1625 if (*next == prop) { 1626 *next = prop->next; 1627 prop->next = NULL; 1628 return prop; 1629 } 1630 } 1631 return NULL; 1632 } 1633 1634 /** 1635 * __of_add_property - Add a property to a node without lock operations 1636 * @np: Caller's Device Node 1637 * @prop: Property to add 1638 */ 1639 int __of_add_property(struct device_node *np, struct property *prop) 1640 { 1641 int rc = 0; 1642 unsigned long flags; 1643 struct property **next; 1644 1645 raw_spin_lock_irqsave(&devtree_lock, flags); 1646 1647 __of_remove_property_from_list(&np->deadprops, prop); 1648 1649 prop->next = NULL; 1650 next = &np->properties; 1651 while (*next) { 1652 if (strcmp(prop->name, (*next)->name) == 0) { 1653 /* duplicate ! don't insert it */ 1654 rc = -EEXIST; 1655 goto out_unlock; 1656 } 1657 next = &(*next)->next; 1658 } 1659 *next = prop; 1660 1661 out_unlock: 1662 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1663 if (rc) 1664 return rc; 1665 1666 __of_add_property_sysfs(np, prop); 1667 return 0; 1668 } 1669 1670 /** 1671 * of_add_property - Add a property to a node 1672 * @np: Caller's Device Node 1673 * @prop: Property to add 1674 */ 1675 int of_add_property(struct device_node *np, struct property *prop) 1676 { 1677 int rc; 1678 1679 mutex_lock(&of_mutex); 1680 rc = __of_add_property(np, prop); 1681 mutex_unlock(&of_mutex); 1682 1683 if (!rc) 1684 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); 1685 1686 return rc; 1687 } 1688 EXPORT_SYMBOL_GPL(of_add_property); 1689 1690 int __of_remove_property(struct device_node *np, struct property *prop) 1691 { 1692 unsigned long flags; 1693 int rc = -ENODEV; 1694 1695 raw_spin_lock_irqsave(&devtree_lock, flags); 1696 1697 if (__of_remove_property_from_list(&np->properties, prop)) { 1698 /* Found the property, add it to deadprops list */ 1699 prop->next = np->deadprops; 1700 np->deadprops = prop; 1701 rc = 0; 1702 } 1703 1704 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1705 if (rc) 1706 return rc; 1707 1708 __of_remove_property_sysfs(np, prop); 1709 return 0; 1710 } 1711 1712 /** 1713 * of_remove_property - Remove a property from a node. 1714 * @np: Caller's Device Node 1715 * @prop: Property to remove 1716 * 1717 * Note that we don't actually remove it, since we have given out 1718 * who-knows-how-many pointers to the data using get-property. 1719 * Instead we just move the property to the "dead properties" 1720 * list, so it won't be found any more. 1721 */ 1722 int of_remove_property(struct device_node *np, struct property *prop) 1723 { 1724 int rc; 1725 1726 if (!prop) 1727 return -ENODEV; 1728 1729 mutex_lock(&of_mutex); 1730 rc = __of_remove_property(np, prop); 1731 mutex_unlock(&of_mutex); 1732 1733 if (!rc) 1734 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); 1735 1736 return rc; 1737 } 1738 EXPORT_SYMBOL_GPL(of_remove_property); 1739 1740 int __of_update_property(struct device_node *np, struct property *newprop, 1741 struct property **oldpropp) 1742 { 1743 struct property **next, *oldprop; 1744 unsigned long flags; 1745 1746 raw_spin_lock_irqsave(&devtree_lock, flags); 1747 1748 __of_remove_property_from_list(&np->deadprops, newprop); 1749 1750 for (next = &np->properties; *next; next = &(*next)->next) { 1751 if (of_prop_cmp((*next)->name, newprop->name) == 0) 1752 break; 1753 } 1754 *oldpropp = oldprop = *next; 1755 1756 if (oldprop) { 1757 /* replace the node */ 1758 newprop->next = oldprop->next; 1759 *next = newprop; 1760 oldprop->next = np->deadprops; 1761 np->deadprops = oldprop; 1762 } else { 1763 /* new node */ 1764 newprop->next = NULL; 1765 *next = newprop; 1766 } 1767 1768 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1769 1770 __of_update_property_sysfs(np, newprop, oldprop); 1771 1772 return 0; 1773 } 1774 1775 /* 1776 * of_update_property - Update a property in a node, if the property does 1777 * not exist, add it. 1778 * 1779 * Note that we don't actually remove it, since we have given out 1780 * who-knows-how-many pointers to the data using get-property. 1781 * Instead we just move the property to the "dead properties" list, 1782 * and add the new property to the property list 1783 */ 1784 int of_update_property(struct device_node *np, struct property *newprop) 1785 { 1786 struct property *oldprop; 1787 int rc; 1788 1789 if (!newprop->name) 1790 return -EINVAL; 1791 1792 mutex_lock(&of_mutex); 1793 rc = __of_update_property(np, newprop, &oldprop); 1794 mutex_unlock(&of_mutex); 1795 1796 if (!rc) 1797 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); 1798 1799 return rc; 1800 } 1801 1802 static void of_alias_add(struct alias_prop *ap, struct device_node *np, 1803 int id, const char *stem, int stem_len) 1804 { 1805 ap->np = np; 1806 ap->id = id; 1807 strscpy(ap->stem, stem, stem_len + 1); 1808 list_add_tail(&ap->link, &aliases_lookup); 1809 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", 1810 ap->alias, ap->stem, ap->id, np); 1811 } 1812 1813 /** 1814 * of_alias_scan - Scan all properties of the 'aliases' node 1815 * @dt_alloc: An allocator that provides a virtual address to memory 1816 * for storing the resulting tree 1817 * 1818 * The function scans all the properties of the 'aliases' node and populates 1819 * the global lookup table with the properties. It returns the 1820 * number of alias properties found, or an error code in case of failure. 1821 */ 1822 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) 1823 { 1824 const struct property *pp; 1825 1826 of_aliases = of_find_node_by_path("/aliases"); 1827 of_chosen = of_find_node_by_path("/chosen"); 1828 if (of_chosen == NULL) 1829 of_chosen = of_find_node_by_path("/chosen@0"); 1830 1831 if (of_chosen) { 1832 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ 1833 const char *name = NULL; 1834 1835 if (of_property_read_string(of_chosen, "stdout-path", &name)) 1836 of_property_read_string(of_chosen, "linux,stdout-path", 1837 &name); 1838 if (IS_ENABLED(CONFIG_PPC) && !name) 1839 of_property_read_string(of_aliases, "stdout", &name); 1840 if (name) 1841 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1842 if (of_stdout) 1843 of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT; 1844 } 1845 1846 if (!of_aliases) 1847 return; 1848 1849 for_each_property_of_node(of_aliases, pp) { 1850 const char *start = pp->name; 1851 const char *end = start + strlen(start); 1852 struct device_node *np; 1853 struct alias_prop *ap; 1854 int id, len; 1855 1856 /* Skip those we do not want to proceed */ 1857 if (!strcmp(pp->name, "name") || 1858 !strcmp(pp->name, "phandle") || 1859 !strcmp(pp->name, "linux,phandle")) 1860 continue; 1861 1862 np = of_find_node_by_path(pp->value); 1863 if (!np) 1864 continue; 1865 1866 /* walk the alias backwards to extract the id and work out 1867 * the 'stem' string */ 1868 while (isdigit(*(end-1)) && end > start) 1869 end--; 1870 len = end - start; 1871 1872 if (kstrtoint(end, 10, &id) < 0) 1873 continue; 1874 1875 /* Allocate an alias_prop with enough space for the stem */ 1876 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); 1877 if (!ap) 1878 continue; 1879 memset(ap, 0, sizeof(*ap) + len + 1); 1880 ap->alias = start; 1881 of_alias_add(ap, np, id, start, len); 1882 } 1883 } 1884 1885 /** 1886 * of_alias_get_id - Get alias id for the given device_node 1887 * @np: Pointer to the given device_node 1888 * @stem: Alias stem of the given device_node 1889 * 1890 * The function travels the lookup table to get the alias id for the given 1891 * device_node and alias stem. 1892 * 1893 * Return: The alias id if found. 1894 */ 1895 int of_alias_get_id(const struct device_node *np, const char *stem) 1896 { 1897 struct alias_prop *app; 1898 int id = -ENODEV; 1899 1900 mutex_lock(&of_mutex); 1901 list_for_each_entry(app, &aliases_lookup, link) { 1902 if (strcmp(app->stem, stem) != 0) 1903 continue; 1904 1905 if (np == app->np) { 1906 id = app->id; 1907 break; 1908 } 1909 } 1910 mutex_unlock(&of_mutex); 1911 1912 return id; 1913 } 1914 EXPORT_SYMBOL_GPL(of_alias_get_id); 1915 1916 /** 1917 * of_alias_get_highest_id - Get highest alias id for the given stem 1918 * @stem: Alias stem to be examined 1919 * 1920 * The function travels the lookup table to get the highest alias id for the 1921 * given alias stem. It returns the alias id if found. 1922 */ 1923 int of_alias_get_highest_id(const char *stem) 1924 { 1925 struct alias_prop *app; 1926 int id = -ENODEV; 1927 1928 mutex_lock(&of_mutex); 1929 list_for_each_entry(app, &aliases_lookup, link) { 1930 if (strcmp(app->stem, stem) != 0) 1931 continue; 1932 1933 if (app->id > id) 1934 id = app->id; 1935 } 1936 mutex_unlock(&of_mutex); 1937 1938 return id; 1939 } 1940 EXPORT_SYMBOL_GPL(of_alias_get_highest_id); 1941 1942 /** 1943 * of_console_check() - Test and setup console for DT setup 1944 * @dn: Pointer to device node 1945 * @name: Name to use for preferred console without index. ex. "ttyS" 1946 * @index: Index to use for preferred console. 1947 * 1948 * Check if the given device node matches the stdout-path property in the 1949 * /chosen node. If it does then register it as the preferred console. 1950 * 1951 * Return: TRUE if console successfully setup. Otherwise return FALSE. 1952 */ 1953 bool of_console_check(const struct device_node *dn, char *name, int index) 1954 { 1955 if (!dn || dn != of_stdout || console_set_on_cmdline) 1956 return false; 1957 1958 /* 1959 * XXX: cast `options' to char pointer to suppress complication 1960 * warnings: printk, UART and console drivers expect char pointer. 1961 */ 1962 return !add_preferred_console(name, index, (char *)of_stdout_options); 1963 } 1964 EXPORT_SYMBOL_GPL(of_console_check); 1965 1966 /** 1967 * of_find_next_cache_node - Find a node's subsidiary cache 1968 * @np: node of type "cpu" or "cache" 1969 * 1970 * Return: A node pointer with refcount incremented, use 1971 * of_node_put() on it when done. Caller should hold a reference 1972 * to np. 1973 */ 1974 struct device_node *of_find_next_cache_node(const struct device_node *np) 1975 { 1976 struct device_node *child, *cache_node; 1977 1978 cache_node = of_parse_phandle(np, "l2-cache", 0); 1979 if (!cache_node) 1980 cache_node = of_parse_phandle(np, "next-level-cache", 0); 1981 1982 if (cache_node) 1983 return cache_node; 1984 1985 /* OF on pmac has nodes instead of properties named "l2-cache" 1986 * beneath CPU nodes. 1987 */ 1988 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) 1989 for_each_child_of_node(np, child) 1990 if (of_node_is_type(child, "cache")) 1991 return child; 1992 1993 return NULL; 1994 } 1995 1996 /** 1997 * of_find_last_cache_level - Find the level at which the last cache is 1998 * present for the given logical cpu 1999 * 2000 * @cpu: cpu number(logical index) for which the last cache level is needed 2001 * 2002 * Return: The level at which the last cache is present. It is exactly 2003 * same as the total number of cache levels for the given logical cpu. 2004 */ 2005 int of_find_last_cache_level(unsigned int cpu) 2006 { 2007 u32 cache_level = 0; 2008 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); 2009 2010 while (np) { 2011 of_node_put(prev); 2012 prev = np; 2013 np = of_find_next_cache_node(np); 2014 } 2015 2016 of_property_read_u32(prev, "cache-level", &cache_level); 2017 of_node_put(prev); 2018 2019 return cache_level; 2020 } 2021 2022 /** 2023 * of_map_id - Translate an ID through a downstream mapping. 2024 * @np: root complex device node. 2025 * @id: device ID to map. 2026 * @map_name: property name of the map to use. 2027 * @map_mask_name: optional property name of the mask to use. 2028 * @target: optional pointer to a target device node. 2029 * @id_out: optional pointer to receive the translated ID. 2030 * 2031 * Given a device ID, look up the appropriate implementation-defined 2032 * platform ID and/or the target device which receives transactions on that 2033 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or 2034 * @id_out may be NULL if only the other is required. If @target points to 2035 * a non-NULL device node pointer, only entries targeting that node will be 2036 * matched; if it points to a NULL value, it will receive the device node of 2037 * the first matching target phandle, with a reference held. 2038 * 2039 * Return: 0 on success or a standard error code on failure. 2040 */ 2041 int of_map_id(const struct device_node *np, u32 id, 2042 const char *map_name, const char *map_mask_name, 2043 struct device_node **target, u32 *id_out) 2044 { 2045 u32 map_mask, masked_id; 2046 int map_len; 2047 const __be32 *map = NULL; 2048 2049 if (!np || !map_name || (!target && !id_out)) 2050 return -EINVAL; 2051 2052 map = of_get_property(np, map_name, &map_len); 2053 if (!map) { 2054 if (target) 2055 return -ENODEV; 2056 /* Otherwise, no map implies no translation */ 2057 *id_out = id; 2058 return 0; 2059 } 2060 2061 if (!map_len || map_len % (4 * sizeof(*map))) { 2062 pr_err("%pOF: Error: Bad %s length: %d\n", np, 2063 map_name, map_len); 2064 return -EINVAL; 2065 } 2066 2067 /* The default is to select all bits. */ 2068 map_mask = 0xffffffff; 2069 2070 /* 2071 * Can be overridden by "{iommu,msi}-map-mask" property. 2072 * If of_property_read_u32() fails, the default is used. 2073 */ 2074 if (map_mask_name) 2075 of_property_read_u32(np, map_mask_name, &map_mask); 2076 2077 masked_id = map_mask & id; 2078 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { 2079 struct device_node *phandle_node; 2080 u32 id_base = be32_to_cpup(map + 0); 2081 u32 phandle = be32_to_cpup(map + 1); 2082 u32 out_base = be32_to_cpup(map + 2); 2083 u32 id_len = be32_to_cpup(map + 3); 2084 2085 if (id_base & ~map_mask) { 2086 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", 2087 np, map_name, map_name, 2088 map_mask, id_base); 2089 return -EFAULT; 2090 } 2091 2092 if (masked_id < id_base || masked_id >= id_base + id_len) 2093 continue; 2094 2095 phandle_node = of_find_node_by_phandle(phandle); 2096 if (!phandle_node) 2097 return -ENODEV; 2098 2099 if (target) { 2100 if (*target) 2101 of_node_put(phandle_node); 2102 else 2103 *target = phandle_node; 2104 2105 if (*target != phandle_node) 2106 continue; 2107 } 2108 2109 if (id_out) 2110 *id_out = masked_id - id_base + out_base; 2111 2112 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", 2113 np, map_name, map_mask, id_base, out_base, 2114 id_len, id, masked_id - id_base + out_base); 2115 return 0; 2116 } 2117 2118 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, 2119 id, target && *target ? *target : NULL); 2120 2121 /* Bypasses translation */ 2122 if (id_out) 2123 *id_out = id; 2124 return 0; 2125 } 2126 EXPORT_SYMBOL_GPL(of_map_id); 2127