1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Procedures for creating, accessing and interpreting the device tree. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 * 11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net 12 * 13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and 14 * Grant Likely. 15 */ 16 17 #define pr_fmt(fmt) "OF: " fmt 18 19 #include <linux/cleanup.h> 20 #include <linux/console.h> 21 #include <linux/ctype.h> 22 #include <linux/cpu.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/of_device.h> 26 #include <linux/of_graph.h> 27 #include <linux/spinlock.h> 28 #include <linux/slab.h> 29 #include <linux/string.h> 30 #include <linux/proc_fs.h> 31 32 #include "of_private.h" 33 34 LIST_HEAD(aliases_lookup); 35 36 struct device_node *of_root; 37 EXPORT_SYMBOL(of_root); 38 struct device_node *of_chosen; 39 EXPORT_SYMBOL(of_chosen); 40 struct device_node *of_aliases; 41 struct device_node *of_stdout; 42 static const char *of_stdout_options; 43 44 struct kset *of_kset; 45 46 /* 47 * Used to protect the of_aliases, to hold off addition of nodes to sysfs. 48 * This mutex must be held whenever modifications are being made to the 49 * device tree. The of_{attach,detach}_node() and 50 * of_{add,remove,update}_property() helpers make sure this happens. 51 */ 52 DEFINE_MUTEX(of_mutex); 53 54 /* use when traversing tree through the child, sibling, 55 * or parent members of struct device_node. 56 */ 57 DEFINE_RAW_SPINLOCK(devtree_lock); 58 59 bool of_node_name_eq(const struct device_node *np, const char *name) 60 { 61 const char *node_name; 62 size_t len; 63 64 if (!np) 65 return false; 66 67 node_name = kbasename(np->full_name); 68 len = strchrnul(node_name, '@') - node_name; 69 70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); 71 } 72 EXPORT_SYMBOL(of_node_name_eq); 73 74 bool of_node_name_prefix(const struct device_node *np, const char *prefix) 75 { 76 if (!np) 77 return false; 78 79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; 80 } 81 EXPORT_SYMBOL(of_node_name_prefix); 82 83 static bool __of_node_is_type(const struct device_node *np, const char *type) 84 { 85 const char *match = __of_get_property(np, "device_type", NULL); 86 87 return np && match && type && !strcmp(match, type); 88 } 89 90 #define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \ 91 IS_ENABLED(CONFIG_SPARC) || \ 92 of_find_compatible_node(NULL, NULL, "coreboot") \ 93 ) 94 95 int of_bus_n_addr_cells(struct device_node *np) 96 { 97 u32 cells; 98 99 for (; np; np = np->parent) { 100 if (!of_property_read_u32(np, "#address-cells", &cells)) 101 return cells; 102 /* 103 * Default root value and walking parent nodes for "#address-cells" 104 * is deprecated. Any platforms which hit this warning should 105 * be added to the excluded list. 106 */ 107 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 108 "Missing '#address-cells' in %pOF\n", np); 109 } 110 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 111 } 112 113 int of_n_addr_cells(struct device_node *np) 114 { 115 if (np->parent) 116 np = np->parent; 117 118 return of_bus_n_addr_cells(np); 119 } 120 EXPORT_SYMBOL(of_n_addr_cells); 121 122 int of_bus_n_size_cells(struct device_node *np) 123 { 124 u32 cells; 125 126 for (; np; np = np->parent) { 127 if (!of_property_read_u32(np, "#size-cells", &cells)) 128 return cells; 129 /* 130 * Default root value and walking parent nodes for "#size-cells" 131 * is deprecated. Any platforms which hit this warning should 132 * be added to the excluded list. 133 */ 134 WARN_ONCE(!EXCLUDED_DEFAULT_CELLS_PLATFORMS, 135 "Missing '#size-cells' in %pOF\n", np); 136 } 137 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 138 } 139 140 int of_n_size_cells(struct device_node *np) 141 { 142 if (np->parent) 143 np = np->parent; 144 145 return of_bus_n_size_cells(np); 146 } 147 EXPORT_SYMBOL(of_n_size_cells); 148 149 #ifdef CONFIG_NUMA 150 int __weak of_node_to_nid(struct device_node *np) 151 { 152 return NUMA_NO_NODE; 153 } 154 #endif 155 156 #define OF_PHANDLE_CACHE_BITS 7 157 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) 158 159 static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; 160 161 static u32 of_phandle_cache_hash(phandle handle) 162 { 163 return hash_32(handle, OF_PHANDLE_CACHE_BITS); 164 } 165 166 /* 167 * Caller must hold devtree_lock. 168 */ 169 void __of_phandle_cache_inv_entry(phandle handle) 170 { 171 u32 handle_hash; 172 struct device_node *np; 173 174 if (!handle) 175 return; 176 177 handle_hash = of_phandle_cache_hash(handle); 178 179 np = phandle_cache[handle_hash]; 180 if (np && handle == np->phandle) 181 phandle_cache[handle_hash] = NULL; 182 } 183 184 void __init of_core_init(void) 185 { 186 struct device_node *np; 187 188 of_platform_register_reconfig_notifier(); 189 190 /* Create the kset, and register existing nodes */ 191 mutex_lock(&of_mutex); 192 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 193 if (!of_kset) { 194 mutex_unlock(&of_mutex); 195 pr_err("failed to register existing nodes\n"); 196 return; 197 } 198 for_each_of_allnodes(np) { 199 __of_attach_node_sysfs(np); 200 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) 201 phandle_cache[of_phandle_cache_hash(np->phandle)] = np; 202 } 203 mutex_unlock(&of_mutex); 204 205 /* Symlink in /proc as required by userspace ABI */ 206 if (of_root) 207 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 208 } 209 210 static struct property *__of_find_property(const struct device_node *np, 211 const char *name, int *lenp) 212 { 213 struct property *pp; 214 215 if (!np) 216 return NULL; 217 218 for (pp = np->properties; pp; pp = pp->next) { 219 if (of_prop_cmp(pp->name, name) == 0) { 220 if (lenp) 221 *lenp = pp->length; 222 break; 223 } 224 } 225 226 return pp; 227 } 228 229 struct property *of_find_property(const struct device_node *np, 230 const char *name, 231 int *lenp) 232 { 233 struct property *pp; 234 unsigned long flags; 235 236 raw_spin_lock_irqsave(&devtree_lock, flags); 237 pp = __of_find_property(np, name, lenp); 238 raw_spin_unlock_irqrestore(&devtree_lock, flags); 239 240 return pp; 241 } 242 EXPORT_SYMBOL(of_find_property); 243 244 struct device_node *__of_find_all_nodes(struct device_node *prev) 245 { 246 struct device_node *np; 247 if (!prev) { 248 np = of_root; 249 } else if (prev->child) { 250 np = prev->child; 251 } else { 252 /* Walk back up looking for a sibling, or the end of the structure */ 253 np = prev; 254 while (np->parent && !np->sibling) 255 np = np->parent; 256 np = np->sibling; /* Might be null at the end of the tree */ 257 } 258 return np; 259 } 260 261 /** 262 * of_find_all_nodes - Get next node in global list 263 * @prev: Previous node or NULL to start iteration 264 * of_node_put() will be called on it 265 * 266 * Return: A node pointer with refcount incremented, use 267 * of_node_put() on it when done. 268 */ 269 struct device_node *of_find_all_nodes(struct device_node *prev) 270 { 271 struct device_node *np; 272 unsigned long flags; 273 274 raw_spin_lock_irqsave(&devtree_lock, flags); 275 np = __of_find_all_nodes(prev); 276 of_node_get(np); 277 of_node_put(prev); 278 raw_spin_unlock_irqrestore(&devtree_lock, flags); 279 return np; 280 } 281 EXPORT_SYMBOL(of_find_all_nodes); 282 283 /* 284 * Find a property with a given name for a given node 285 * and return the value. 286 */ 287 const void *__of_get_property(const struct device_node *np, 288 const char *name, int *lenp) 289 { 290 const struct property *pp = __of_find_property(np, name, lenp); 291 292 return pp ? pp->value : NULL; 293 } 294 295 /* 296 * Find a property with a given name for a given node 297 * and return the value. 298 */ 299 const void *of_get_property(const struct device_node *np, const char *name, 300 int *lenp) 301 { 302 const struct property *pp = of_find_property(np, name, lenp); 303 304 return pp ? pp->value : NULL; 305 } 306 EXPORT_SYMBOL(of_get_property); 307 308 /** 309 * __of_device_is_compatible() - Check if the node matches given constraints 310 * @device: pointer to node 311 * @compat: required compatible string, NULL or "" for any match 312 * @type: required device_type value, NULL or "" for any match 313 * @name: required node name, NULL or "" for any match 314 * 315 * Checks if the given @compat, @type and @name strings match the 316 * properties of the given @device. A constraints can be skipped by 317 * passing NULL or an empty string as the constraint. 318 * 319 * Returns 0 for no match, and a positive integer on match. The return 320 * value is a relative score with larger values indicating better 321 * matches. The score is weighted for the most specific compatible value 322 * to get the highest score. Matching type is next, followed by matching 323 * name. Practically speaking, this results in the following priority 324 * order for matches: 325 * 326 * 1. specific compatible && type && name 327 * 2. specific compatible && type 328 * 3. specific compatible && name 329 * 4. specific compatible 330 * 5. general compatible && type && name 331 * 6. general compatible && type 332 * 7. general compatible && name 333 * 8. general compatible 334 * 9. type && name 335 * 10. type 336 * 11. name 337 */ 338 static int __of_device_is_compatible(const struct device_node *device, 339 const char *compat, const char *type, const char *name) 340 { 341 const struct property *prop; 342 const char *cp; 343 int index = 0, score = 0; 344 345 /* Compatible match has highest priority */ 346 if (compat && compat[0]) { 347 prop = __of_find_property(device, "compatible", NULL); 348 for (cp = of_prop_next_string(prop, NULL); cp; 349 cp = of_prop_next_string(prop, cp), index++) { 350 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 351 score = INT_MAX/2 - (index << 2); 352 break; 353 } 354 } 355 if (!score) 356 return 0; 357 } 358 359 /* Matching type is better than matching name */ 360 if (type && type[0]) { 361 if (!__of_node_is_type(device, type)) 362 return 0; 363 score += 2; 364 } 365 366 /* Matching name is a bit better than not */ 367 if (name && name[0]) { 368 if (!of_node_name_eq(device, name)) 369 return 0; 370 score++; 371 } 372 373 return score; 374 } 375 376 /** Checks if the given "compat" string matches one of the strings in 377 * the device's "compatible" property 378 */ 379 int of_device_is_compatible(const struct device_node *device, 380 const char *compat) 381 { 382 unsigned long flags; 383 int res; 384 385 raw_spin_lock_irqsave(&devtree_lock, flags); 386 res = __of_device_is_compatible(device, compat, NULL, NULL); 387 raw_spin_unlock_irqrestore(&devtree_lock, flags); 388 return res; 389 } 390 EXPORT_SYMBOL(of_device_is_compatible); 391 392 /** Checks if the device is compatible with any of the entries in 393 * a NULL terminated array of strings. Returns the best match 394 * score or 0. 395 */ 396 int of_device_compatible_match(const struct device_node *device, 397 const char *const *compat) 398 { 399 unsigned int tmp, score = 0; 400 401 if (!compat) 402 return 0; 403 404 while (*compat) { 405 tmp = of_device_is_compatible(device, *compat); 406 if (tmp > score) 407 score = tmp; 408 compat++; 409 } 410 411 return score; 412 } 413 EXPORT_SYMBOL_GPL(of_device_compatible_match); 414 415 /** 416 * of_machine_compatible_match - Test root of device tree against a compatible array 417 * @compats: NULL terminated array of compatible strings to look for in root node's compatible property. 418 * 419 * Returns true if the root node has any of the given compatible values in its 420 * compatible property. 421 */ 422 bool of_machine_compatible_match(const char *const *compats) 423 { 424 struct device_node *root; 425 int rc = 0; 426 427 root = of_find_node_by_path("/"); 428 if (root) { 429 rc = of_device_compatible_match(root, compats); 430 of_node_put(root); 431 } 432 433 return rc != 0; 434 } 435 EXPORT_SYMBOL(of_machine_compatible_match); 436 437 /** 438 * of_machine_get_match - Test root of device tree against an of_device_id array 439 * @matches: NULL terminated array of of_device_id match structures to search in 440 * 441 * Returns matched entry or NULL 442 */ 443 const struct of_device_id *of_machine_get_match(const struct of_device_id *matches) 444 { 445 struct device_node *root; 446 const struct of_device_id *match = NULL; 447 448 root = of_find_node_by_path("/"); 449 if (root) { 450 match = of_match_node(matches, root); 451 of_node_put(root); 452 } 453 454 return match; 455 } 456 EXPORT_SYMBOL(of_machine_get_match); 457 458 /** 459 * of_machine_get_match_data - Tell if root of device tree has a matching of_match structure 460 * @matches: NULL terminated array of of_device_id match structures to search in 461 * 462 * Returns data associated with matched entry or NULL 463 */ 464 const void *of_machine_get_match_data(const struct of_device_id *matches) 465 { 466 const struct of_device_id *match; 467 468 match = of_machine_get_match(matches); 469 if (!match) 470 return NULL; 471 472 return match->data; 473 } 474 EXPORT_SYMBOL(of_machine_get_match_data); 475 476 static bool __of_device_is_status(const struct device_node *device, 477 const char * const*strings) 478 { 479 const char *status; 480 int statlen; 481 482 if (!device) 483 return false; 484 485 status = __of_get_property(device, "status", &statlen); 486 if (status == NULL) 487 return false; 488 489 if (statlen > 0) { 490 while (*strings) { 491 unsigned int len = strlen(*strings); 492 493 if ((*strings)[len - 1] == '-') { 494 if (!strncmp(status, *strings, len)) 495 return true; 496 } else { 497 if (!strcmp(status, *strings)) 498 return true; 499 } 500 strings++; 501 } 502 } 503 504 return false; 505 } 506 507 /** 508 * __of_device_is_available - check if a device is available for use 509 * 510 * @device: Node to check for availability, with locks already held 511 * 512 * Return: True if the status property is absent or set to "okay" or "ok", 513 * false otherwise 514 */ 515 static bool __of_device_is_available(const struct device_node *device) 516 { 517 static const char * const ok[] = {"okay", "ok", NULL}; 518 519 if (!device) 520 return false; 521 522 return !__of_get_property(device, "status", NULL) || 523 __of_device_is_status(device, ok); 524 } 525 526 /** 527 * __of_device_is_reserved - check if a device is reserved 528 * 529 * @device: Node to check for availability, with locks already held 530 * 531 * Return: True if the status property is set to "reserved", false otherwise 532 */ 533 static bool __of_device_is_reserved(const struct device_node *device) 534 { 535 static const char * const reserved[] = {"reserved", NULL}; 536 537 return __of_device_is_status(device, reserved); 538 } 539 540 /** 541 * of_device_is_available - check if a device is available for use 542 * 543 * @device: Node to check for availability 544 * 545 * Return: True if the status property is absent or set to "okay" or "ok", 546 * false otherwise 547 */ 548 bool of_device_is_available(const struct device_node *device) 549 { 550 unsigned long flags; 551 bool res; 552 553 raw_spin_lock_irqsave(&devtree_lock, flags); 554 res = __of_device_is_available(device); 555 raw_spin_unlock_irqrestore(&devtree_lock, flags); 556 return res; 557 558 } 559 EXPORT_SYMBOL(of_device_is_available); 560 561 /** 562 * __of_device_is_fail - check if a device has status "fail" or "fail-..." 563 * 564 * @device: Node to check status for, with locks already held 565 * 566 * Return: True if the status property is set to "fail" or "fail-..." (for any 567 * error code suffix), false otherwise 568 */ 569 static bool __of_device_is_fail(const struct device_node *device) 570 { 571 static const char * const fail[] = {"fail", "fail-", NULL}; 572 573 return __of_device_is_status(device, fail); 574 } 575 576 /** 577 * of_device_is_big_endian - check if a device has BE registers 578 * 579 * @device: Node to check for endianness 580 * 581 * Return: True if the device has a "big-endian" property, or if the kernel 582 * was compiled for BE *and* the device has a "native-endian" property. 583 * Returns false otherwise. 584 * 585 * Callers would nominally use ioread32be/iowrite32be if 586 * of_device_is_big_endian() == true, or readl/writel otherwise. 587 */ 588 bool of_device_is_big_endian(const struct device_node *device) 589 { 590 if (of_property_read_bool(device, "big-endian")) 591 return true; 592 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && 593 of_property_read_bool(device, "native-endian")) 594 return true; 595 return false; 596 } 597 EXPORT_SYMBOL(of_device_is_big_endian); 598 599 /** 600 * of_get_parent - Get a node's parent if any 601 * @node: Node to get parent 602 * 603 * Return: A node pointer with refcount incremented, use 604 * of_node_put() on it when done. 605 */ 606 struct device_node *of_get_parent(const struct device_node *node) 607 { 608 struct device_node *np; 609 unsigned long flags; 610 611 if (!node) 612 return NULL; 613 614 raw_spin_lock_irqsave(&devtree_lock, flags); 615 np = of_node_get(node->parent); 616 raw_spin_unlock_irqrestore(&devtree_lock, flags); 617 return np; 618 } 619 EXPORT_SYMBOL(of_get_parent); 620 621 /** 622 * of_get_next_parent - Iterate to a node's parent 623 * @node: Node to get parent of 624 * 625 * This is like of_get_parent() except that it drops the 626 * refcount on the passed node, making it suitable for iterating 627 * through a node's parents. 628 * 629 * Return: A node pointer with refcount incremented, use 630 * of_node_put() on it when done. 631 */ 632 struct device_node *of_get_next_parent(struct device_node *node) 633 { 634 struct device_node *parent; 635 unsigned long flags; 636 637 if (!node) 638 return NULL; 639 640 raw_spin_lock_irqsave(&devtree_lock, flags); 641 parent = of_node_get(node->parent); 642 of_node_put(node); 643 raw_spin_unlock_irqrestore(&devtree_lock, flags); 644 return parent; 645 } 646 EXPORT_SYMBOL(of_get_next_parent); 647 648 static struct device_node *__of_get_next_child(const struct device_node *node, 649 struct device_node *prev) 650 { 651 struct device_node *next; 652 653 if (!node) 654 return NULL; 655 656 next = prev ? prev->sibling : node->child; 657 of_node_get(next); 658 of_node_put(prev); 659 return next; 660 } 661 #define __for_each_child_of_node(parent, child) \ 662 for (child = __of_get_next_child(parent, NULL); child != NULL; \ 663 child = __of_get_next_child(parent, child)) 664 665 /** 666 * of_get_next_child - Iterate a node childs 667 * @node: parent node 668 * @prev: previous child of the parent node, or NULL to get first 669 * 670 * Return: A node pointer with refcount incremented, use of_node_put() on 671 * it when done. Returns NULL when prev is the last child. Decrements the 672 * refcount of prev. 673 */ 674 struct device_node *of_get_next_child(const struct device_node *node, 675 struct device_node *prev) 676 { 677 struct device_node *next; 678 unsigned long flags; 679 680 raw_spin_lock_irqsave(&devtree_lock, flags); 681 next = __of_get_next_child(node, prev); 682 raw_spin_unlock_irqrestore(&devtree_lock, flags); 683 return next; 684 } 685 EXPORT_SYMBOL(of_get_next_child); 686 687 /** 688 * of_get_next_child_with_prefix - Find the next child node with prefix 689 * @node: parent node 690 * @prev: previous child of the parent node, or NULL to get first 691 * @prefix: prefix that the node name should have 692 * 693 * This function is like of_get_next_child(), except that it automatically 694 * skips any nodes whose name doesn't have the given prefix. 695 * 696 * Return: A node pointer with refcount incremented, use 697 * of_node_put() on it when done. 698 */ 699 struct device_node *of_get_next_child_with_prefix(const struct device_node *node, 700 struct device_node *prev, 701 const char *prefix) 702 { 703 struct device_node *next; 704 unsigned long flags; 705 706 if (!node) 707 return NULL; 708 709 raw_spin_lock_irqsave(&devtree_lock, flags); 710 next = prev ? prev->sibling : node->child; 711 for (; next; next = next->sibling) { 712 if (!of_node_name_prefix(next, prefix)) 713 continue; 714 if (of_node_get(next)) 715 break; 716 } 717 of_node_put(prev); 718 raw_spin_unlock_irqrestore(&devtree_lock, flags); 719 return next; 720 } 721 EXPORT_SYMBOL(of_get_next_child_with_prefix); 722 723 static struct device_node *of_get_next_status_child(const struct device_node *node, 724 struct device_node *prev, 725 bool (*checker)(const struct device_node *)) 726 { 727 struct device_node *next; 728 unsigned long flags; 729 730 if (!node) 731 return NULL; 732 733 raw_spin_lock_irqsave(&devtree_lock, flags); 734 next = prev ? prev->sibling : node->child; 735 for (; next; next = next->sibling) { 736 if (!checker(next)) 737 continue; 738 if (of_node_get(next)) 739 break; 740 } 741 of_node_put(prev); 742 raw_spin_unlock_irqrestore(&devtree_lock, flags); 743 return next; 744 } 745 746 /** 747 * of_get_next_available_child - Find the next available child node 748 * @node: parent node 749 * @prev: previous child of the parent node, or NULL to get first 750 * 751 * This function is like of_get_next_child(), except that it 752 * automatically skips any disabled nodes (i.e. status = "disabled"). 753 */ 754 struct device_node *of_get_next_available_child(const struct device_node *node, 755 struct device_node *prev) 756 { 757 return of_get_next_status_child(node, prev, __of_device_is_available); 758 } 759 EXPORT_SYMBOL(of_get_next_available_child); 760 761 /** 762 * of_get_next_reserved_child - Find the next reserved child node 763 * @node: parent node 764 * @prev: previous child of the parent node, or NULL to get first 765 * 766 * This function is like of_get_next_child(), except that it 767 * automatically skips any disabled nodes (i.e. status = "disabled"). 768 */ 769 struct device_node *of_get_next_reserved_child(const struct device_node *node, 770 struct device_node *prev) 771 { 772 return of_get_next_status_child(node, prev, __of_device_is_reserved); 773 } 774 EXPORT_SYMBOL(of_get_next_reserved_child); 775 776 /** 777 * of_get_next_cpu_node - Iterate on cpu nodes 778 * @prev: previous child of the /cpus node, or NULL to get first 779 * 780 * Unusable CPUs (those with the status property set to "fail" or "fail-...") 781 * will be skipped. 782 * 783 * Return: A cpu node pointer with refcount incremented, use of_node_put() 784 * on it when done. Returns NULL when prev is the last child. Decrements 785 * the refcount of prev. 786 */ 787 struct device_node *of_get_next_cpu_node(struct device_node *prev) 788 { 789 struct device_node *next = NULL; 790 unsigned long flags; 791 struct device_node *node; 792 793 if (!prev) 794 node = of_find_node_by_path("/cpus"); 795 796 raw_spin_lock_irqsave(&devtree_lock, flags); 797 if (prev) 798 next = prev->sibling; 799 else if (node) { 800 next = node->child; 801 of_node_put(node); 802 } 803 for (; next; next = next->sibling) { 804 if (__of_device_is_fail(next)) 805 continue; 806 if (!(of_node_name_eq(next, "cpu") || 807 __of_node_is_type(next, "cpu"))) 808 continue; 809 if (of_node_get(next)) 810 break; 811 } 812 of_node_put(prev); 813 raw_spin_unlock_irqrestore(&devtree_lock, flags); 814 return next; 815 } 816 EXPORT_SYMBOL(of_get_next_cpu_node); 817 818 /** 819 * of_get_compatible_child - Find compatible child node 820 * @parent: parent node 821 * @compatible: compatible string 822 * 823 * Lookup child node whose compatible property contains the given compatible 824 * string. 825 * 826 * Return: a node pointer with refcount incremented, use of_node_put() on it 827 * when done; or NULL if not found. 828 */ 829 struct device_node *of_get_compatible_child(const struct device_node *parent, 830 const char *compatible) 831 { 832 struct device_node *child; 833 834 for_each_child_of_node(parent, child) { 835 if (of_device_is_compatible(child, compatible)) 836 break; 837 } 838 839 return child; 840 } 841 EXPORT_SYMBOL(of_get_compatible_child); 842 843 /** 844 * of_get_child_by_name - Find the child node by name for a given parent 845 * @node: parent node 846 * @name: child name to look for. 847 * 848 * This function looks for child node for given matching name 849 * 850 * Return: A node pointer if found, with refcount incremented, use 851 * of_node_put() on it when done. 852 * Returns NULL if node is not found. 853 */ 854 struct device_node *of_get_child_by_name(const struct device_node *node, 855 const char *name) 856 { 857 struct device_node *child; 858 859 for_each_child_of_node(node, child) 860 if (of_node_name_eq(child, name)) 861 break; 862 return child; 863 } 864 EXPORT_SYMBOL(of_get_child_by_name); 865 866 /** 867 * of_get_available_child_by_name - Find the available child node by name for a given parent 868 * @node: parent node 869 * @name: child name to look for. 870 * 871 * This function looks for child node for given matching name and checks the 872 * device's availability for use. 873 * 874 * Return: A node pointer if found, with refcount incremented, use 875 * of_node_put() on it when done. 876 * Returns NULL if node is not found. 877 */ 878 struct device_node *of_get_available_child_by_name(const struct device_node *node, 879 const char *name) 880 { 881 struct device_node *child; 882 883 child = of_get_child_by_name(node, name); 884 if (child && !of_device_is_available(child)) { 885 of_node_put(child); 886 return NULL; 887 } 888 889 return child; 890 } 891 EXPORT_SYMBOL(of_get_available_child_by_name); 892 893 struct device_node *__of_find_node_by_path(const struct device_node *parent, 894 const char *path) 895 { 896 struct device_node *child; 897 int len; 898 899 len = strcspn(path, "/:"); 900 if (!len) 901 return NULL; 902 903 __for_each_child_of_node(parent, child) { 904 const char *name = kbasename(child->full_name); 905 if (strncmp(path, name, len) == 0 && (strlen(name) == len)) 906 return child; 907 } 908 return NULL; 909 } 910 911 struct device_node *__of_find_node_by_full_path(struct device_node *node, 912 const char *path) 913 { 914 const char *separator = strchr(path, ':'); 915 916 while (node && *path == '/') { 917 struct device_node *tmp = node; 918 919 path++; /* Increment past '/' delimiter */ 920 node = __of_find_node_by_path(node, path); 921 of_node_put(tmp); 922 path = strchrnul(path, '/'); 923 if (separator && separator < path) 924 break; 925 } 926 return node; 927 } 928 929 /** 930 * of_find_node_opts_by_path - Find a node matching a full OF path 931 * @path: Either the full path to match, or if the path does not 932 * start with '/', the name of a property of the /aliases 933 * node (an alias). In the case of an alias, the node 934 * matching the alias' value will be returned. 935 * @opts: Address of a pointer into which to store the start of 936 * an options string appended to the end of the path with 937 * a ':' separator. 938 * 939 * Valid paths: 940 * * /foo/bar Full path 941 * * foo Valid alias 942 * * foo/bar Valid alias + relative path 943 * 944 * Return: A node pointer with refcount incremented, use 945 * of_node_put() on it when done. 946 */ 947 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) 948 { 949 struct device_node *np = NULL; 950 const struct property *pp; 951 unsigned long flags; 952 const char *separator = strchr(path, ':'); 953 954 if (opts) 955 *opts = separator ? separator + 1 : NULL; 956 957 if (strcmp(path, "/") == 0) 958 return of_node_get(of_root); 959 960 /* The path could begin with an alias */ 961 if (*path != '/') { 962 int len; 963 const char *p = strchrnul(path, '/'); 964 965 if (separator && separator < p) 966 p = separator; 967 len = p - path; 968 969 /* of_aliases must not be NULL */ 970 if (!of_aliases) 971 return NULL; 972 973 for_each_property_of_node(of_aliases, pp) { 974 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { 975 np = of_find_node_by_path(pp->value); 976 break; 977 } 978 } 979 if (!np) 980 return NULL; 981 path = p; 982 } 983 984 /* Step down the tree matching path components */ 985 raw_spin_lock_irqsave(&devtree_lock, flags); 986 if (!np) 987 np = of_node_get(of_root); 988 np = __of_find_node_by_full_path(np, path); 989 raw_spin_unlock_irqrestore(&devtree_lock, flags); 990 return np; 991 } 992 EXPORT_SYMBOL(of_find_node_opts_by_path); 993 994 /** 995 * of_find_node_by_name - Find a node by its "name" property 996 * @from: The node to start searching from or NULL; the node 997 * you pass will not be searched, only the next one 998 * will. Typically, you pass what the previous call 999 * returned. of_node_put() will be called on @from. 1000 * @name: The name string to match against 1001 * 1002 * Return: A node pointer with refcount incremented, use 1003 * of_node_put() on it when done. 1004 */ 1005 struct device_node *of_find_node_by_name(struct device_node *from, 1006 const char *name) 1007 { 1008 struct device_node *np; 1009 unsigned long flags; 1010 1011 raw_spin_lock_irqsave(&devtree_lock, flags); 1012 for_each_of_allnodes_from(from, np) 1013 if (of_node_name_eq(np, name) && of_node_get(np)) 1014 break; 1015 of_node_put(from); 1016 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1017 return np; 1018 } 1019 EXPORT_SYMBOL(of_find_node_by_name); 1020 1021 /** 1022 * of_find_node_by_type - Find a node by its "device_type" property 1023 * @from: The node to start searching from, or NULL to start searching 1024 * the entire device tree. The node you pass will not be 1025 * searched, only the next one will; typically, you pass 1026 * what the previous call returned. of_node_put() will be 1027 * called on from for you. 1028 * @type: The type string to match against 1029 * 1030 * Return: A node pointer with refcount incremented, use 1031 * of_node_put() on it when done. 1032 */ 1033 struct device_node *of_find_node_by_type(struct device_node *from, 1034 const char *type) 1035 { 1036 struct device_node *np; 1037 unsigned long flags; 1038 1039 raw_spin_lock_irqsave(&devtree_lock, flags); 1040 for_each_of_allnodes_from(from, np) 1041 if (__of_node_is_type(np, type) && of_node_get(np)) 1042 break; 1043 of_node_put(from); 1044 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1045 return np; 1046 } 1047 EXPORT_SYMBOL(of_find_node_by_type); 1048 1049 /** 1050 * of_find_compatible_node - Find a node based on type and one of the 1051 * tokens in its "compatible" property 1052 * @from: The node to start searching from or NULL, the node 1053 * you pass will not be searched, only the next one 1054 * will; typically, you pass what the previous call 1055 * returned. of_node_put() will be called on it 1056 * @type: The type string to match "device_type" or NULL to ignore 1057 * @compatible: The string to match to one of the tokens in the device 1058 * "compatible" list. 1059 * 1060 * Return: A node pointer with refcount incremented, use 1061 * of_node_put() on it when done. 1062 */ 1063 struct device_node *of_find_compatible_node(struct device_node *from, 1064 const char *type, const char *compatible) 1065 { 1066 struct device_node *np; 1067 unsigned long flags; 1068 1069 raw_spin_lock_irqsave(&devtree_lock, flags); 1070 for_each_of_allnodes_from(from, np) 1071 if (__of_device_is_compatible(np, compatible, type, NULL) && 1072 of_node_get(np)) 1073 break; 1074 of_node_put(from); 1075 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1076 return np; 1077 } 1078 EXPORT_SYMBOL(of_find_compatible_node); 1079 1080 /** 1081 * of_find_node_with_property - Find a node which has a property with 1082 * the given name. 1083 * @from: The node to start searching from or NULL, the node 1084 * you pass will not be searched, only the next one 1085 * will; typically, you pass what the previous call 1086 * returned. of_node_put() will be called on it 1087 * @prop_name: The name of the property to look for. 1088 * 1089 * Return: A node pointer with refcount incremented, use 1090 * of_node_put() on it when done. 1091 */ 1092 struct device_node *of_find_node_with_property(struct device_node *from, 1093 const char *prop_name) 1094 { 1095 struct device_node *np; 1096 unsigned long flags; 1097 1098 raw_spin_lock_irqsave(&devtree_lock, flags); 1099 for_each_of_allnodes_from(from, np) { 1100 if (__of_find_property(np, prop_name, NULL)) { 1101 of_node_get(np); 1102 break; 1103 } 1104 } 1105 of_node_put(from); 1106 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1107 return np; 1108 } 1109 EXPORT_SYMBOL(of_find_node_with_property); 1110 1111 static 1112 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 1113 const struct device_node *node) 1114 { 1115 const struct of_device_id *best_match = NULL; 1116 int score, best_score = 0; 1117 1118 if (!matches) 1119 return NULL; 1120 1121 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 1122 score = __of_device_is_compatible(node, matches->compatible, 1123 matches->type, matches->name); 1124 if (score > best_score) { 1125 best_match = matches; 1126 best_score = score; 1127 } 1128 } 1129 1130 return best_match; 1131 } 1132 1133 /** 1134 * of_match_node - Tell if a device_node has a matching of_match structure 1135 * @matches: array of of device match structures to search in 1136 * @node: the of device structure to match against 1137 * 1138 * Low level utility function used by device matching. 1139 */ 1140 const struct of_device_id *of_match_node(const struct of_device_id *matches, 1141 const struct device_node *node) 1142 { 1143 const struct of_device_id *match; 1144 unsigned long flags; 1145 1146 raw_spin_lock_irqsave(&devtree_lock, flags); 1147 match = __of_match_node(matches, node); 1148 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1149 return match; 1150 } 1151 EXPORT_SYMBOL(of_match_node); 1152 1153 /** 1154 * of_find_matching_node_and_match - Find a node based on an of_device_id 1155 * match table. 1156 * @from: The node to start searching from or NULL, the node 1157 * you pass will not be searched, only the next one 1158 * will; typically, you pass what the previous call 1159 * returned. of_node_put() will be called on it 1160 * @matches: array of of device match structures to search in 1161 * @match: Updated to point at the matches entry which matched 1162 * 1163 * Return: A node pointer with refcount incremented, use 1164 * of_node_put() on it when done. 1165 */ 1166 struct device_node *of_find_matching_node_and_match(struct device_node *from, 1167 const struct of_device_id *matches, 1168 const struct of_device_id **match) 1169 { 1170 struct device_node *np; 1171 const struct of_device_id *m; 1172 unsigned long flags; 1173 1174 if (match) 1175 *match = NULL; 1176 1177 raw_spin_lock_irqsave(&devtree_lock, flags); 1178 for_each_of_allnodes_from(from, np) { 1179 m = __of_match_node(matches, np); 1180 if (m && of_node_get(np)) { 1181 if (match) 1182 *match = m; 1183 break; 1184 } 1185 } 1186 of_node_put(from); 1187 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1188 return np; 1189 } 1190 EXPORT_SYMBOL(of_find_matching_node_and_match); 1191 1192 /** 1193 * of_alias_from_compatible - Lookup appropriate alias for a device node 1194 * depending on compatible 1195 * @node: pointer to a device tree node 1196 * @alias: Pointer to buffer that alias value will be copied into 1197 * @len: Length of alias value 1198 * 1199 * Based on the value of the compatible property, this routine will attempt 1200 * to choose an appropriate alias value for a particular device tree node. 1201 * It does this by stripping the manufacturer prefix (as delimited by a ',') 1202 * from the first entry in the compatible list property. 1203 * 1204 * Note: The matching on just the "product" side of the compatible is a relic 1205 * from I2C and SPI. Please do not add any new user. 1206 * 1207 * Return: This routine returns 0 on success, <0 on failure. 1208 */ 1209 int of_alias_from_compatible(const struct device_node *node, char *alias, int len) 1210 { 1211 const char *compatible, *p; 1212 int cplen; 1213 1214 compatible = of_get_property(node, "compatible", &cplen); 1215 if (!compatible || strlen(compatible) > cplen) 1216 return -ENODEV; 1217 p = strchr(compatible, ','); 1218 strscpy(alias, p ? p + 1 : compatible, len); 1219 return 0; 1220 } 1221 EXPORT_SYMBOL_GPL(of_alias_from_compatible); 1222 1223 /** 1224 * of_find_node_by_phandle - Find a node given a phandle 1225 * @handle: phandle of the node to find 1226 * 1227 * Return: A node pointer with refcount incremented, use 1228 * of_node_put() on it when done. 1229 */ 1230 struct device_node *of_find_node_by_phandle(phandle handle) 1231 { 1232 struct device_node *np = NULL; 1233 unsigned long flags; 1234 u32 handle_hash; 1235 1236 if (!handle) 1237 return NULL; 1238 1239 handle_hash = of_phandle_cache_hash(handle); 1240 1241 raw_spin_lock_irqsave(&devtree_lock, flags); 1242 1243 if (phandle_cache[handle_hash] && 1244 handle == phandle_cache[handle_hash]->phandle) 1245 np = phandle_cache[handle_hash]; 1246 1247 if (!np) { 1248 for_each_of_allnodes(np) 1249 if (np->phandle == handle && 1250 !of_node_check_flag(np, OF_DETACHED)) { 1251 phandle_cache[handle_hash] = np; 1252 break; 1253 } 1254 } 1255 1256 of_node_get(np); 1257 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1258 return np; 1259 } 1260 EXPORT_SYMBOL(of_find_node_by_phandle); 1261 1262 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) 1263 { 1264 int i; 1265 printk("%s %pOF", msg, args->np); 1266 for (i = 0; i < args->args_count; i++) { 1267 const char delim = i ? ',' : ':'; 1268 1269 pr_cont("%c%08x", delim, args->args[i]); 1270 } 1271 pr_cont("\n"); 1272 } 1273 1274 int of_phandle_iterator_init(struct of_phandle_iterator *it, 1275 const struct device_node *np, 1276 const char *list_name, 1277 const char *cells_name, 1278 int cell_count) 1279 { 1280 const __be32 *list; 1281 int size; 1282 1283 memset(it, 0, sizeof(*it)); 1284 1285 /* 1286 * one of cell_count or cells_name must be provided to determine the 1287 * argument length. 1288 */ 1289 if (cell_count < 0 && !cells_name) 1290 return -EINVAL; 1291 1292 list = of_get_property(np, list_name, &size); 1293 if (!list) 1294 return -ENOENT; 1295 1296 it->cells_name = cells_name; 1297 it->cell_count = cell_count; 1298 it->parent = np; 1299 it->list_end = list + size / sizeof(*list); 1300 it->phandle_end = list; 1301 it->cur = list; 1302 1303 return 0; 1304 } 1305 EXPORT_SYMBOL_GPL(of_phandle_iterator_init); 1306 1307 int of_phandle_iterator_next(struct of_phandle_iterator *it) 1308 { 1309 uint32_t count = 0; 1310 1311 if (it->node) { 1312 of_node_put(it->node); 1313 it->node = NULL; 1314 } 1315 1316 if (!it->cur || it->phandle_end >= it->list_end) 1317 return -ENOENT; 1318 1319 it->cur = it->phandle_end; 1320 1321 /* If phandle is 0, then it is an empty entry with no arguments. */ 1322 it->phandle = be32_to_cpup(it->cur++); 1323 1324 if (it->phandle) { 1325 1326 /* 1327 * Find the provider node and parse the #*-cells property to 1328 * determine the argument length. 1329 */ 1330 it->node = of_find_node_by_phandle(it->phandle); 1331 1332 if (it->cells_name) { 1333 if (!it->node) { 1334 pr_err("%pOF: could not find phandle %d\n", 1335 it->parent, it->phandle); 1336 goto err; 1337 } 1338 1339 if (of_property_read_u32(it->node, it->cells_name, 1340 &count)) { 1341 /* 1342 * If both cell_count and cells_name is given, 1343 * fall back to cell_count in absence 1344 * of the cells_name property 1345 */ 1346 if (it->cell_count >= 0) { 1347 count = it->cell_count; 1348 } else { 1349 pr_err("%pOF: could not get %s for %pOF\n", 1350 it->parent, 1351 it->cells_name, 1352 it->node); 1353 goto err; 1354 } 1355 } 1356 } else { 1357 count = it->cell_count; 1358 } 1359 1360 /* 1361 * Make sure that the arguments actually fit in the remaining 1362 * property data length 1363 */ 1364 if (it->cur + count > it->list_end) { 1365 if (it->cells_name) 1366 pr_err("%pOF: %s = %d found %td\n", 1367 it->parent, it->cells_name, 1368 count, it->list_end - it->cur); 1369 else 1370 pr_err("%pOF: phandle %s needs %d, found %td\n", 1371 it->parent, of_node_full_name(it->node), 1372 count, it->list_end - it->cur); 1373 goto err; 1374 } 1375 } 1376 1377 it->phandle_end = it->cur + count; 1378 it->cur_count = count; 1379 1380 return 0; 1381 1382 err: 1383 if (it->node) { 1384 of_node_put(it->node); 1385 it->node = NULL; 1386 } 1387 1388 return -EINVAL; 1389 } 1390 EXPORT_SYMBOL_GPL(of_phandle_iterator_next); 1391 1392 int of_phandle_iterator_args(struct of_phandle_iterator *it, 1393 uint32_t *args, 1394 int size) 1395 { 1396 int i, count; 1397 1398 count = it->cur_count; 1399 1400 if (WARN_ON(size < count)) 1401 count = size; 1402 1403 for (i = 0; i < count; i++) 1404 args[i] = be32_to_cpup(it->cur++); 1405 1406 return count; 1407 } 1408 1409 int __of_parse_phandle_with_args(const struct device_node *np, 1410 const char *list_name, 1411 const char *cells_name, 1412 int cell_count, int index, 1413 struct of_phandle_args *out_args) 1414 { 1415 struct of_phandle_iterator it; 1416 int rc, cur_index = 0; 1417 1418 if (index < 0) 1419 return -EINVAL; 1420 1421 /* Loop over the phandles until all the requested entry is found */ 1422 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { 1423 /* 1424 * All of the error cases bail out of the loop, so at 1425 * this point, the parsing is successful. If the requested 1426 * index matches, then fill the out_args structure and return, 1427 * or return -ENOENT for an empty entry. 1428 */ 1429 rc = -ENOENT; 1430 if (cur_index == index) { 1431 if (!it.phandle) 1432 goto err; 1433 1434 if (out_args) { 1435 int c; 1436 1437 c = of_phandle_iterator_args(&it, 1438 out_args->args, 1439 MAX_PHANDLE_ARGS); 1440 out_args->np = it.node; 1441 out_args->args_count = c; 1442 } else { 1443 of_node_put(it.node); 1444 } 1445 1446 /* Found it! return success */ 1447 return 0; 1448 } 1449 1450 cur_index++; 1451 } 1452 1453 /* 1454 * Unlock node before returning result; will be one of: 1455 * -ENOENT : index is for empty phandle 1456 * -EINVAL : parsing error on data 1457 */ 1458 1459 err: 1460 of_node_put(it.node); 1461 return rc; 1462 } 1463 EXPORT_SYMBOL(__of_parse_phandle_with_args); 1464 1465 /** 1466 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it 1467 * @np: pointer to a device tree node containing a list 1468 * @list_name: property name that contains a list 1469 * @stem_name: stem of property names that specify phandles' arguments count 1470 * @index: index of a phandle to parse out 1471 * @out_args: optional pointer to output arguments structure (will be filled) 1472 * 1473 * This function is useful to parse lists of phandles and their arguments. 1474 * Returns 0 on success and fills out_args, on error returns appropriate errno 1475 * value. The difference between this function and of_parse_phandle_with_args() 1476 * is that this API remaps a phandle if the node the phandle points to has 1477 * a <@stem_name>-map property. 1478 * 1479 * Caller is responsible to call of_node_put() on the returned out_args->np 1480 * pointer. 1481 * 1482 * Example:: 1483 * 1484 * phandle1: node1 { 1485 * #list-cells = <2>; 1486 * }; 1487 * 1488 * phandle2: node2 { 1489 * #list-cells = <1>; 1490 * }; 1491 * 1492 * phandle3: node3 { 1493 * #list-cells = <1>; 1494 * list-map = <0 &phandle2 3>, 1495 * <1 &phandle2 2>, 1496 * <2 &phandle1 5 1>; 1497 * list-map-mask = <0x3>; 1498 * }; 1499 * 1500 * node4 { 1501 * list = <&phandle1 1 2 &phandle3 0>; 1502 * }; 1503 * 1504 * To get a device_node of the ``node2`` node you may call this: 1505 * of_parse_phandle_with_args(node4, "list", "list", 1, &args); 1506 */ 1507 int of_parse_phandle_with_args_map(const struct device_node *np, 1508 const char *list_name, 1509 const char *stem_name, 1510 int index, struct of_phandle_args *out_args) 1511 { 1512 char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); 1513 char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name); 1514 char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); 1515 char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); 1516 struct device_node *cur, *new = NULL; 1517 const __be32 *map, *mask, *pass; 1518 static const __be32 dummy_mask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) }; 1519 static const __be32 dummy_pass[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(0) }; 1520 __be32 initial_match_array[MAX_PHANDLE_ARGS]; 1521 const __be32 *match_array = initial_match_array; 1522 int i, ret, map_len, match; 1523 u32 list_size, new_size; 1524 1525 if (index < 0) 1526 return -EINVAL; 1527 1528 if (!cells_name || !map_name || !mask_name || !pass_name) 1529 return -ENOMEM; 1530 1531 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, 1532 out_args); 1533 if (ret) 1534 return ret; 1535 1536 /* Get the #<list>-cells property */ 1537 cur = out_args->np; 1538 ret = of_property_read_u32(cur, cells_name, &list_size); 1539 if (ret < 0) 1540 goto put; 1541 1542 /* Precalculate the match array - this simplifies match loop */ 1543 for (i = 0; i < list_size; i++) 1544 initial_match_array[i] = cpu_to_be32(out_args->args[i]); 1545 1546 ret = -EINVAL; 1547 while (cur) { 1548 /* Get the <list>-map property */ 1549 map = of_get_property(cur, map_name, &map_len); 1550 if (!map) { 1551 return 0; 1552 } 1553 map_len /= sizeof(u32); 1554 1555 /* Get the <list>-map-mask property (optional) */ 1556 mask = of_get_property(cur, mask_name, NULL); 1557 if (!mask) 1558 mask = dummy_mask; 1559 /* Iterate through <list>-map property */ 1560 match = 0; 1561 while (map_len > (list_size + 1) && !match) { 1562 /* Compare specifiers */ 1563 match = 1; 1564 for (i = 0; i < list_size; i++, map_len--) 1565 match &= !((match_array[i] ^ *map++) & mask[i]); 1566 1567 of_node_put(new); 1568 new = of_find_node_by_phandle(be32_to_cpup(map)); 1569 map++; 1570 map_len--; 1571 1572 /* Check if not found */ 1573 if (!new) { 1574 ret = -EINVAL; 1575 goto put; 1576 } 1577 1578 if (!of_device_is_available(new)) 1579 match = 0; 1580 1581 ret = of_property_read_u32(new, cells_name, &new_size); 1582 if (ret) 1583 goto put; 1584 1585 /* Check for malformed properties */ 1586 if (WARN_ON(new_size > MAX_PHANDLE_ARGS) || 1587 map_len < new_size) { 1588 ret = -EINVAL; 1589 goto put; 1590 } 1591 1592 /* Move forward by new node's #<list>-cells amount */ 1593 map += new_size; 1594 map_len -= new_size; 1595 } 1596 if (!match) { 1597 ret = -ENOENT; 1598 goto put; 1599 } 1600 1601 /* Get the <list>-map-pass-thru property (optional) */ 1602 pass = of_get_property(cur, pass_name, NULL); 1603 if (!pass) 1604 pass = dummy_pass; 1605 1606 /* 1607 * Successfully parsed a <list>-map translation; copy new 1608 * specifier into the out_args structure, keeping the 1609 * bits specified in <list>-map-pass-thru. 1610 */ 1611 for (i = 0; i < new_size; i++) { 1612 __be32 val = *(map - new_size + i); 1613 1614 if (i < list_size) { 1615 val &= ~pass[i]; 1616 val |= cpu_to_be32(out_args->args[i]) & pass[i]; 1617 } 1618 1619 initial_match_array[i] = val; 1620 out_args->args[i] = be32_to_cpu(val); 1621 } 1622 out_args->args_count = list_size = new_size; 1623 /* Iterate again with new provider */ 1624 out_args->np = new; 1625 of_node_put(cur); 1626 cur = new; 1627 new = NULL; 1628 } 1629 put: 1630 of_node_put(cur); 1631 of_node_put(new); 1632 return ret; 1633 } 1634 EXPORT_SYMBOL(of_parse_phandle_with_args_map); 1635 1636 /** 1637 * of_count_phandle_with_args() - Find the number of phandles references in a property 1638 * @np: pointer to a device tree node containing a list 1639 * @list_name: property name that contains a list 1640 * @cells_name: property name that specifies phandles' arguments count 1641 * 1642 * Return: The number of phandle + argument tuples within a property. It 1643 * is a typical pattern to encode a list of phandle and variable 1644 * arguments into a single property. The number of arguments is encoded 1645 * by a property in the phandle-target node. For example, a gpios 1646 * property would contain a list of GPIO specifies consisting of a 1647 * phandle and 1 or more arguments. The number of arguments are 1648 * determined by the #gpio-cells property in the node pointed to by the 1649 * phandle. 1650 */ 1651 int of_count_phandle_with_args(const struct device_node *np, const char *list_name, 1652 const char *cells_name) 1653 { 1654 struct of_phandle_iterator it; 1655 int rc, cur_index = 0; 1656 1657 /* 1658 * If cells_name is NULL we assume a cell count of 0. This makes 1659 * counting the phandles trivial as each 32bit word in the list is a 1660 * phandle and no arguments are to consider. So we don't iterate through 1661 * the list but just use the length to determine the phandle count. 1662 */ 1663 if (!cells_name) { 1664 const __be32 *list; 1665 int size; 1666 1667 list = of_get_property(np, list_name, &size); 1668 if (!list) 1669 return -ENOENT; 1670 1671 return size / sizeof(*list); 1672 } 1673 1674 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); 1675 if (rc) 1676 return rc; 1677 1678 while ((rc = of_phandle_iterator_next(&it)) == 0) 1679 cur_index += 1; 1680 1681 if (rc != -ENOENT) 1682 return rc; 1683 1684 return cur_index; 1685 } 1686 EXPORT_SYMBOL(of_count_phandle_with_args); 1687 1688 static struct property *__of_remove_property_from_list(struct property **list, struct property *prop) 1689 { 1690 struct property **next; 1691 1692 for (next = list; *next; next = &(*next)->next) { 1693 if (*next == prop) { 1694 *next = prop->next; 1695 prop->next = NULL; 1696 return prop; 1697 } 1698 } 1699 return NULL; 1700 } 1701 1702 /** 1703 * __of_add_property - Add a property to a node without lock operations 1704 * @np: Caller's Device Node 1705 * @prop: Property to add 1706 */ 1707 int __of_add_property(struct device_node *np, struct property *prop) 1708 { 1709 int rc = 0; 1710 unsigned long flags; 1711 struct property **next; 1712 1713 raw_spin_lock_irqsave(&devtree_lock, flags); 1714 1715 __of_remove_property_from_list(&np->deadprops, prop); 1716 1717 prop->next = NULL; 1718 next = &np->properties; 1719 while (*next) { 1720 if (of_prop_cmp(prop->name, (*next)->name) == 0) { 1721 /* duplicate ! don't insert it */ 1722 rc = -EEXIST; 1723 goto out_unlock; 1724 } 1725 next = &(*next)->next; 1726 } 1727 *next = prop; 1728 1729 out_unlock: 1730 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1731 if (rc) 1732 return rc; 1733 1734 __of_add_property_sysfs(np, prop); 1735 return 0; 1736 } 1737 1738 /** 1739 * of_add_property - Add a property to a node 1740 * @np: Caller's Device Node 1741 * @prop: Property to add 1742 */ 1743 int of_add_property(struct device_node *np, struct property *prop) 1744 { 1745 int rc; 1746 1747 mutex_lock(&of_mutex); 1748 rc = __of_add_property(np, prop); 1749 mutex_unlock(&of_mutex); 1750 1751 if (!rc) 1752 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); 1753 1754 return rc; 1755 } 1756 EXPORT_SYMBOL_GPL(of_add_property); 1757 1758 int __of_remove_property(struct device_node *np, struct property *prop) 1759 { 1760 unsigned long flags; 1761 int rc = -ENODEV; 1762 1763 raw_spin_lock_irqsave(&devtree_lock, flags); 1764 1765 if (__of_remove_property_from_list(&np->properties, prop)) { 1766 /* Found the property, add it to deadprops list */ 1767 prop->next = np->deadprops; 1768 np->deadprops = prop; 1769 rc = 0; 1770 } 1771 1772 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1773 if (rc) 1774 return rc; 1775 1776 __of_remove_property_sysfs(np, prop); 1777 return 0; 1778 } 1779 1780 /** 1781 * of_remove_property - Remove a property from a node. 1782 * @np: Caller's Device Node 1783 * @prop: Property to remove 1784 * 1785 * Note that we don't actually remove it, since we have given out 1786 * who-knows-how-many pointers to the data using get-property. 1787 * Instead we just move the property to the "dead properties" 1788 * list, so it won't be found any more. 1789 */ 1790 int of_remove_property(struct device_node *np, struct property *prop) 1791 { 1792 int rc; 1793 1794 if (!prop) 1795 return -ENODEV; 1796 1797 mutex_lock(&of_mutex); 1798 rc = __of_remove_property(np, prop); 1799 mutex_unlock(&of_mutex); 1800 1801 if (!rc) 1802 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); 1803 1804 return rc; 1805 } 1806 EXPORT_SYMBOL_GPL(of_remove_property); 1807 1808 int __of_update_property(struct device_node *np, struct property *newprop, 1809 struct property **oldpropp) 1810 { 1811 struct property **next, *oldprop; 1812 unsigned long flags; 1813 1814 raw_spin_lock_irqsave(&devtree_lock, flags); 1815 1816 __of_remove_property_from_list(&np->deadprops, newprop); 1817 1818 for (next = &np->properties; *next; next = &(*next)->next) { 1819 if (of_prop_cmp((*next)->name, newprop->name) == 0) 1820 break; 1821 } 1822 *oldpropp = oldprop = *next; 1823 1824 if (oldprop) { 1825 /* replace the node */ 1826 newprop->next = oldprop->next; 1827 *next = newprop; 1828 oldprop->next = np->deadprops; 1829 np->deadprops = oldprop; 1830 } else { 1831 /* new node */ 1832 newprop->next = NULL; 1833 *next = newprop; 1834 } 1835 1836 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1837 1838 __of_update_property_sysfs(np, newprop, oldprop); 1839 1840 return 0; 1841 } 1842 1843 /* 1844 * of_update_property - Update a property in a node, if the property does 1845 * not exist, add it. 1846 * 1847 * Note that we don't actually remove it, since we have given out 1848 * who-knows-how-many pointers to the data using get-property. 1849 * Instead we just move the property to the "dead properties" list, 1850 * and add the new property to the property list 1851 */ 1852 int of_update_property(struct device_node *np, struct property *newprop) 1853 { 1854 struct property *oldprop; 1855 int rc; 1856 1857 if (!newprop->name) 1858 return -EINVAL; 1859 1860 mutex_lock(&of_mutex); 1861 rc = __of_update_property(np, newprop, &oldprop); 1862 mutex_unlock(&of_mutex); 1863 1864 if (!rc) 1865 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); 1866 1867 return rc; 1868 } 1869 1870 static void of_alias_add(struct alias_prop *ap, struct device_node *np, 1871 int id, const char *stem, int stem_len) 1872 { 1873 ap->np = np; 1874 ap->id = id; 1875 strscpy(ap->stem, stem, stem_len + 1); 1876 list_add_tail(&ap->link, &aliases_lookup); 1877 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", 1878 ap->alias, ap->stem, ap->id, np); 1879 } 1880 1881 /** 1882 * of_alias_scan - Scan all properties of the 'aliases' node 1883 * @dt_alloc: An allocator that provides a virtual address to memory 1884 * for storing the resulting tree 1885 * 1886 * The function scans all the properties of the 'aliases' node and populates 1887 * the global lookup table with the properties. 1888 */ 1889 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) 1890 { 1891 const struct property *pp; 1892 1893 of_aliases = of_find_node_by_path("/aliases"); 1894 of_chosen = of_find_node_by_path("/chosen"); 1895 if (of_chosen == NULL) 1896 of_chosen = of_find_node_by_path("/chosen@0"); 1897 1898 if (of_chosen) { 1899 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ 1900 const char *name = NULL; 1901 1902 if (of_property_read_string(of_chosen, "stdout-path", &name)) 1903 of_property_read_string(of_chosen, "linux,stdout-path", 1904 &name); 1905 if (IS_ENABLED(CONFIG_PPC) && !name) 1906 of_property_read_string(of_aliases, "stdout", &name); 1907 if (name) 1908 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1909 if (of_stdout) 1910 of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT; 1911 } 1912 1913 if (!of_aliases) 1914 return; 1915 1916 for_each_property_of_node(of_aliases, pp) { 1917 const char *start = pp->name; 1918 const char *end = start + strlen(start); 1919 struct device_node *np; 1920 struct alias_prop *ap; 1921 int id, len; 1922 1923 /* Skip those we do not want to proceed */ 1924 if (is_pseudo_property(pp->name)) 1925 continue; 1926 1927 np = of_find_node_by_path(pp->value); 1928 if (!np) 1929 continue; 1930 1931 /* walk the alias backwards to extract the id and work out 1932 * the 'stem' string */ 1933 while (isdigit(*(end-1)) && end > start) 1934 end--; 1935 len = end - start; 1936 1937 if (kstrtoint(end, 10, &id) < 0) { 1938 of_node_put(np); 1939 continue; 1940 } 1941 1942 /* Allocate an alias_prop with enough space for the stem */ 1943 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); 1944 if (!ap) { 1945 of_node_put(np); 1946 continue; 1947 } 1948 memset(ap, 0, sizeof(*ap) + len + 1); 1949 ap->alias = start; 1950 of_alias_add(ap, np, id, start, len); 1951 } 1952 } 1953 1954 /** 1955 * of_alias_get_id - Get alias id for the given device_node 1956 * @np: Pointer to the given device_node 1957 * @stem: Alias stem of the given device_node 1958 * 1959 * The function travels the lookup table to get the alias id for the given 1960 * device_node and alias stem. 1961 * 1962 * Return: The alias id if found. 1963 */ 1964 int of_alias_get_id(const struct device_node *np, const char *stem) 1965 { 1966 struct alias_prop *app; 1967 int id = -ENODEV; 1968 1969 mutex_lock(&of_mutex); 1970 list_for_each_entry(app, &aliases_lookup, link) { 1971 if (strcmp(app->stem, stem) != 0) 1972 continue; 1973 1974 if (np == app->np) { 1975 id = app->id; 1976 break; 1977 } 1978 } 1979 mutex_unlock(&of_mutex); 1980 1981 return id; 1982 } 1983 EXPORT_SYMBOL_GPL(of_alias_get_id); 1984 1985 /** 1986 * of_alias_get_highest_id - Get highest alias id for the given stem 1987 * @stem: Alias stem to be examined 1988 * 1989 * The function travels the lookup table to get the highest alias id for the 1990 * given alias stem. It returns the alias id if found. 1991 */ 1992 int of_alias_get_highest_id(const char *stem) 1993 { 1994 struct alias_prop *app; 1995 int id = -ENODEV; 1996 1997 mutex_lock(&of_mutex); 1998 list_for_each_entry(app, &aliases_lookup, link) { 1999 if (strcmp(app->stem, stem) != 0) 2000 continue; 2001 2002 if (app->id > id) 2003 id = app->id; 2004 } 2005 mutex_unlock(&of_mutex); 2006 2007 return id; 2008 } 2009 EXPORT_SYMBOL_GPL(of_alias_get_highest_id); 2010 2011 /** 2012 * of_console_check() - Test and setup console for DT setup 2013 * @dn: Pointer to device node 2014 * @name: Name to use for preferred console without index. ex. "ttyS" 2015 * @index: Index to use for preferred console. 2016 * 2017 * Check if the given device node matches the stdout-path property in the 2018 * /chosen node. If it does then register it as the preferred console. 2019 * 2020 * Return: TRUE if console successfully setup. Otherwise return FALSE. 2021 */ 2022 bool of_console_check(const struct device_node *dn, char *name, int index) 2023 { 2024 if (!dn || dn != of_stdout || console_set_on_cmdline) 2025 return false; 2026 2027 /* 2028 * XXX: cast `options' to char pointer to suppress complication 2029 * warnings: printk, UART and console drivers expect char pointer. 2030 */ 2031 return !add_preferred_console(name, index, (char *)of_stdout_options); 2032 } 2033 EXPORT_SYMBOL_GPL(of_console_check); 2034 2035 /** 2036 * of_find_next_cache_node - Find a node's subsidiary cache 2037 * @np: node of type "cpu" or "cache" 2038 * 2039 * Return: A node pointer with refcount incremented, use 2040 * of_node_put() on it when done. Caller should hold a reference 2041 * to np. 2042 */ 2043 struct device_node *of_find_next_cache_node(const struct device_node *np) 2044 { 2045 struct device_node *child, *cache_node; 2046 2047 cache_node = of_parse_phandle(np, "l2-cache", 0); 2048 if (!cache_node) 2049 cache_node = of_parse_phandle(np, "next-level-cache", 0); 2050 2051 if (cache_node) 2052 return cache_node; 2053 2054 /* OF on pmac has nodes instead of properties named "l2-cache" 2055 * beneath CPU nodes. 2056 */ 2057 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) 2058 for_each_child_of_node(np, child) 2059 if (of_node_is_type(child, "cache")) 2060 return child; 2061 2062 return NULL; 2063 } 2064 2065 /** 2066 * of_find_last_cache_level - Find the level at which the last cache is 2067 * present for the given logical cpu 2068 * 2069 * @cpu: cpu number(logical index) for which the last cache level is needed 2070 * 2071 * Return: The level at which the last cache is present. It is exactly 2072 * same as the total number of cache levels for the given logical cpu. 2073 */ 2074 int of_find_last_cache_level(unsigned int cpu) 2075 { 2076 u32 cache_level = 0; 2077 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); 2078 2079 while (np) { 2080 of_node_put(prev); 2081 prev = np; 2082 np = of_find_next_cache_node(np); 2083 } 2084 2085 of_property_read_u32(prev, "cache-level", &cache_level); 2086 of_node_put(prev); 2087 2088 return cache_level; 2089 } 2090 2091 /** 2092 * of_map_id - Translate an ID through a downstream mapping. 2093 * @np: root complex device node. 2094 * @id: device ID to map. 2095 * @map_name: property name of the map to use. 2096 * @map_mask_name: optional property name of the mask to use. 2097 * @target: optional pointer to a target device node. 2098 * @id_out: optional pointer to receive the translated ID. 2099 * 2100 * Given a device ID, look up the appropriate implementation-defined 2101 * platform ID and/or the target device which receives transactions on that 2102 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or 2103 * @id_out may be NULL if only the other is required. If @target points to 2104 * a non-NULL device node pointer, only entries targeting that node will be 2105 * matched; if it points to a NULL value, it will receive the device node of 2106 * the first matching target phandle, with a reference held. 2107 * 2108 * Return: 0 on success or a standard error code on failure. 2109 */ 2110 int of_map_id(const struct device_node *np, u32 id, 2111 const char *map_name, const char *map_mask_name, 2112 struct device_node **target, u32 *id_out) 2113 { 2114 u32 map_mask, masked_id; 2115 int map_len; 2116 const __be32 *map = NULL; 2117 2118 if (!np || !map_name || (!target && !id_out)) 2119 return -EINVAL; 2120 2121 map = of_get_property(np, map_name, &map_len); 2122 if (!map) { 2123 if (target) 2124 return -ENODEV; 2125 /* Otherwise, no map implies no translation */ 2126 *id_out = id; 2127 return 0; 2128 } 2129 2130 if (!map_len || map_len % (4 * sizeof(*map))) { 2131 pr_err("%pOF: Error: Bad %s length: %d\n", np, 2132 map_name, map_len); 2133 return -EINVAL; 2134 } 2135 2136 /* The default is to select all bits. */ 2137 map_mask = 0xffffffff; 2138 2139 /* 2140 * Can be overridden by "{iommu,msi}-map-mask" property. 2141 * If of_property_read_u32() fails, the default is used. 2142 */ 2143 if (map_mask_name) 2144 of_property_read_u32(np, map_mask_name, &map_mask); 2145 2146 masked_id = map_mask & id; 2147 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { 2148 struct device_node *phandle_node; 2149 u32 id_base = be32_to_cpup(map + 0); 2150 u32 phandle = be32_to_cpup(map + 1); 2151 u32 out_base = be32_to_cpup(map + 2); 2152 u32 id_len = be32_to_cpup(map + 3); 2153 2154 if (id_base & ~map_mask) { 2155 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", 2156 np, map_name, map_name, 2157 map_mask, id_base); 2158 return -EFAULT; 2159 } 2160 2161 if (masked_id < id_base || masked_id >= id_base + id_len) 2162 continue; 2163 2164 phandle_node = of_find_node_by_phandle(phandle); 2165 if (!phandle_node) 2166 return -ENODEV; 2167 2168 if (target) { 2169 if (*target) 2170 of_node_put(phandle_node); 2171 else 2172 *target = phandle_node; 2173 2174 if (*target != phandle_node) 2175 continue; 2176 } 2177 2178 if (id_out) 2179 *id_out = masked_id - id_base + out_base; 2180 2181 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", 2182 np, map_name, map_mask, id_base, out_base, 2183 id_len, id, masked_id - id_base + out_base); 2184 return 0; 2185 } 2186 2187 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, 2188 id, target && *target ? *target : NULL); 2189 2190 /* Bypasses translation */ 2191 if (id_out) 2192 *id_out = id; 2193 return 0; 2194 } 2195 EXPORT_SYMBOL_GPL(of_map_id); 2196