1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Procedures for creating, accessing and interpreting the device tree. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 * 11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net 12 * 13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and 14 * Grant Likely. 15 */ 16 17 #define pr_fmt(fmt) "OF: " fmt 18 19 #include <linux/console.h> 20 #include <linux/ctype.h> 21 #include <linux/cpu.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/of_graph.h> 26 #include <linux/spinlock.h> 27 #include <linux/slab.h> 28 #include <linux/string.h> 29 #include <linux/proc_fs.h> 30 31 #include "of_private.h" 32 33 LIST_HEAD(aliases_lookup); 34 35 struct device_node *of_root; 36 EXPORT_SYMBOL(of_root); 37 struct device_node *of_chosen; 38 struct device_node *of_aliases; 39 struct device_node *of_stdout; 40 static const char *of_stdout_options; 41 42 struct kset *of_kset; 43 44 /* 45 * Used to protect the of_aliases, to hold off addition of nodes to sysfs. 46 * This mutex must be held whenever modifications are being made to the 47 * device tree. The of_{attach,detach}_node() and 48 * of_{add,remove,update}_property() helpers make sure this happens. 49 */ 50 DEFINE_MUTEX(of_mutex); 51 52 /* use when traversing tree through the child, sibling, 53 * or parent members of struct device_node. 54 */ 55 DEFINE_RAW_SPINLOCK(devtree_lock); 56 57 bool of_node_name_eq(const struct device_node *np, const char *name) 58 { 59 const char *node_name; 60 size_t len; 61 62 if (!np) 63 return false; 64 65 node_name = kbasename(np->full_name); 66 len = strchrnul(node_name, '@') - node_name; 67 68 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); 69 } 70 71 bool of_node_name_prefix(const struct device_node *np, const char *prefix) 72 { 73 if (!np) 74 return false; 75 76 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; 77 } 78 79 int of_n_addr_cells(struct device_node *np) 80 { 81 u32 cells; 82 83 do { 84 if (np->parent) 85 np = np->parent; 86 if (!of_property_read_u32(np, "#address-cells", &cells)) 87 return cells; 88 } while (np->parent); 89 /* No #address-cells property for the root node */ 90 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 91 } 92 EXPORT_SYMBOL(of_n_addr_cells); 93 94 int of_n_size_cells(struct device_node *np) 95 { 96 u32 cells; 97 98 do { 99 if (np->parent) 100 np = np->parent; 101 if (!of_property_read_u32(np, "#size-cells", &cells)) 102 return cells; 103 } while (np->parent); 104 /* No #size-cells property for the root node */ 105 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 106 } 107 EXPORT_SYMBOL(of_n_size_cells); 108 109 #ifdef CONFIG_NUMA 110 int __weak of_node_to_nid(struct device_node *np) 111 { 112 return NUMA_NO_NODE; 113 } 114 #endif 115 116 static struct device_node **phandle_cache; 117 static u32 phandle_cache_mask; 118 119 /* 120 * Assumptions behind phandle_cache implementation: 121 * - phandle property values are in a contiguous range of 1..n 122 * 123 * If the assumptions do not hold, then 124 * - the phandle lookup overhead reduction provided by the cache 125 * will likely be less 126 */ 127 void of_populate_phandle_cache(void) 128 { 129 unsigned long flags; 130 u32 cache_entries; 131 struct device_node *np; 132 u32 phandles = 0; 133 134 raw_spin_lock_irqsave(&devtree_lock, flags); 135 136 kfree(phandle_cache); 137 phandle_cache = NULL; 138 139 for_each_of_allnodes(np) 140 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) 141 phandles++; 142 143 if (!phandles) 144 goto out; 145 146 cache_entries = roundup_pow_of_two(phandles); 147 phandle_cache_mask = cache_entries - 1; 148 149 phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), 150 GFP_ATOMIC); 151 if (!phandle_cache) 152 goto out; 153 154 for_each_of_allnodes(np) 155 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) 156 phandle_cache[np->phandle & phandle_cache_mask] = np; 157 158 out: 159 raw_spin_unlock_irqrestore(&devtree_lock, flags); 160 } 161 162 int of_free_phandle_cache(void) 163 { 164 unsigned long flags; 165 166 raw_spin_lock_irqsave(&devtree_lock, flags); 167 168 kfree(phandle_cache); 169 phandle_cache = NULL; 170 171 raw_spin_unlock_irqrestore(&devtree_lock, flags); 172 173 return 0; 174 } 175 #if !defined(CONFIG_MODULES) 176 late_initcall_sync(of_free_phandle_cache); 177 #endif 178 179 void __init of_core_init(void) 180 { 181 struct device_node *np; 182 183 of_populate_phandle_cache(); 184 185 /* Create the kset, and register existing nodes */ 186 mutex_lock(&of_mutex); 187 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 188 if (!of_kset) { 189 mutex_unlock(&of_mutex); 190 pr_err("failed to register existing nodes\n"); 191 return; 192 } 193 for_each_of_allnodes(np) 194 __of_attach_node_sysfs(np); 195 mutex_unlock(&of_mutex); 196 197 /* Symlink in /proc as required by userspace ABI */ 198 if (of_root) 199 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 200 } 201 202 static struct property *__of_find_property(const struct device_node *np, 203 const char *name, int *lenp) 204 { 205 struct property *pp; 206 207 if (!np) 208 return NULL; 209 210 for (pp = np->properties; pp; pp = pp->next) { 211 if (of_prop_cmp(pp->name, name) == 0) { 212 if (lenp) 213 *lenp = pp->length; 214 break; 215 } 216 } 217 218 return pp; 219 } 220 221 struct property *of_find_property(const struct device_node *np, 222 const char *name, 223 int *lenp) 224 { 225 struct property *pp; 226 unsigned long flags; 227 228 raw_spin_lock_irqsave(&devtree_lock, flags); 229 pp = __of_find_property(np, name, lenp); 230 raw_spin_unlock_irqrestore(&devtree_lock, flags); 231 232 return pp; 233 } 234 EXPORT_SYMBOL(of_find_property); 235 236 struct device_node *__of_find_all_nodes(struct device_node *prev) 237 { 238 struct device_node *np; 239 if (!prev) { 240 np = of_root; 241 } else if (prev->child) { 242 np = prev->child; 243 } else { 244 /* Walk back up looking for a sibling, or the end of the structure */ 245 np = prev; 246 while (np->parent && !np->sibling) 247 np = np->parent; 248 np = np->sibling; /* Might be null at the end of the tree */ 249 } 250 return np; 251 } 252 253 /** 254 * of_find_all_nodes - Get next node in global list 255 * @prev: Previous node or NULL to start iteration 256 * of_node_put() will be called on it 257 * 258 * Returns a node pointer with refcount incremented, use 259 * of_node_put() on it when done. 260 */ 261 struct device_node *of_find_all_nodes(struct device_node *prev) 262 { 263 struct device_node *np; 264 unsigned long flags; 265 266 raw_spin_lock_irqsave(&devtree_lock, flags); 267 np = __of_find_all_nodes(prev); 268 of_node_get(np); 269 of_node_put(prev); 270 raw_spin_unlock_irqrestore(&devtree_lock, flags); 271 return np; 272 } 273 EXPORT_SYMBOL(of_find_all_nodes); 274 275 /* 276 * Find a property with a given name for a given node 277 * and return the value. 278 */ 279 const void *__of_get_property(const struct device_node *np, 280 const char *name, int *lenp) 281 { 282 struct property *pp = __of_find_property(np, name, lenp); 283 284 return pp ? pp->value : NULL; 285 } 286 287 /* 288 * Find a property with a given name for a given node 289 * and return the value. 290 */ 291 const void *of_get_property(const struct device_node *np, const char *name, 292 int *lenp) 293 { 294 struct property *pp = of_find_property(np, name, lenp); 295 296 return pp ? pp->value : NULL; 297 } 298 EXPORT_SYMBOL(of_get_property); 299 300 /* 301 * arch_match_cpu_phys_id - Match the given logical CPU and physical id 302 * 303 * @cpu: logical cpu index of a core/thread 304 * @phys_id: physical identifier of a core/thread 305 * 306 * CPU logical to physical index mapping is architecture specific. 307 * However this __weak function provides a default match of physical 308 * id to logical cpu index. phys_id provided here is usually values read 309 * from the device tree which must match the hardware internal registers. 310 * 311 * Returns true if the physical identifier and the logical cpu index 312 * correspond to the same core/thread, false otherwise. 313 */ 314 bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id) 315 { 316 return (u32)phys_id == cpu; 317 } 318 319 /** 320 * Checks if the given "prop_name" property holds the physical id of the 321 * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not 322 * NULL, local thread number within the core is returned in it. 323 */ 324 static bool __of_find_n_match_cpu_property(struct device_node *cpun, 325 const char *prop_name, int cpu, unsigned int *thread) 326 { 327 const __be32 *cell; 328 int ac, prop_len, tid; 329 u64 hwid; 330 331 ac = of_n_addr_cells(cpun); 332 cell = of_get_property(cpun, prop_name, &prop_len); 333 if (!cell || !ac) 334 return false; 335 prop_len /= sizeof(*cell) * ac; 336 for (tid = 0; tid < prop_len; tid++) { 337 hwid = of_read_number(cell, ac); 338 if (arch_match_cpu_phys_id(cpu, hwid)) { 339 if (thread) 340 *thread = tid; 341 return true; 342 } 343 cell += ac; 344 } 345 return false; 346 } 347 348 /* 349 * arch_find_n_match_cpu_physical_id - See if the given device node is 350 * for the cpu corresponding to logical cpu 'cpu'. Return true if so, 351 * else false. If 'thread' is non-NULL, the local thread number within the 352 * core is returned in it. 353 */ 354 bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun, 355 int cpu, unsigned int *thread) 356 { 357 /* Check for non-standard "ibm,ppc-interrupt-server#s" property 358 * for thread ids on PowerPC. If it doesn't exist fallback to 359 * standard "reg" property. 360 */ 361 if (IS_ENABLED(CONFIG_PPC) && 362 __of_find_n_match_cpu_property(cpun, 363 "ibm,ppc-interrupt-server#s", 364 cpu, thread)) 365 return true; 366 367 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread); 368 } 369 370 /** 371 * of_get_cpu_node - Get device node associated with the given logical CPU 372 * 373 * @cpu: CPU number(logical index) for which device node is required 374 * @thread: if not NULL, local thread number within the physical core is 375 * returned 376 * 377 * The main purpose of this function is to retrieve the device node for the 378 * given logical CPU index. It should be used to initialize the of_node in 379 * cpu device. Once of_node in cpu device is populated, all the further 380 * references can use that instead. 381 * 382 * CPU logical to physical index mapping is architecture specific and is built 383 * before booting secondary cores. This function uses arch_match_cpu_phys_id 384 * which can be overridden by architecture specific implementation. 385 * 386 * Returns a node pointer for the logical cpu with refcount incremented, use 387 * of_node_put() on it when done. Returns NULL if not found. 388 */ 389 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) 390 { 391 struct device_node *cpun; 392 393 for_each_node_by_type(cpun, "cpu") { 394 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread)) 395 return cpun; 396 } 397 return NULL; 398 } 399 EXPORT_SYMBOL(of_get_cpu_node); 400 401 /** 402 * of_cpu_node_to_id: Get the logical CPU number for a given device_node 403 * 404 * @cpu_node: Pointer to the device_node for CPU. 405 * 406 * Returns the logical CPU number of the given CPU device_node. 407 * Returns -ENODEV if the CPU is not found. 408 */ 409 int of_cpu_node_to_id(struct device_node *cpu_node) 410 { 411 int cpu; 412 bool found = false; 413 struct device_node *np; 414 415 for_each_possible_cpu(cpu) { 416 np = of_cpu_device_node_get(cpu); 417 found = (cpu_node == np); 418 of_node_put(np); 419 if (found) 420 return cpu; 421 } 422 423 return -ENODEV; 424 } 425 EXPORT_SYMBOL(of_cpu_node_to_id); 426 427 /** 428 * __of_device_is_compatible() - Check if the node matches given constraints 429 * @device: pointer to node 430 * @compat: required compatible string, NULL or "" for any match 431 * @type: required device_type value, NULL or "" for any match 432 * @name: required node name, NULL or "" for any match 433 * 434 * Checks if the given @compat, @type and @name strings match the 435 * properties of the given @device. A constraints can be skipped by 436 * passing NULL or an empty string as the constraint. 437 * 438 * Returns 0 for no match, and a positive integer on match. The return 439 * value is a relative score with larger values indicating better 440 * matches. The score is weighted for the most specific compatible value 441 * to get the highest score. Matching type is next, followed by matching 442 * name. Practically speaking, this results in the following priority 443 * order for matches: 444 * 445 * 1. specific compatible && type && name 446 * 2. specific compatible && type 447 * 3. specific compatible && name 448 * 4. specific compatible 449 * 5. general compatible && type && name 450 * 6. general compatible && type 451 * 7. general compatible && name 452 * 8. general compatible 453 * 9. type && name 454 * 10. type 455 * 11. name 456 */ 457 static int __of_device_is_compatible(const struct device_node *device, 458 const char *compat, const char *type, const char *name) 459 { 460 struct property *prop; 461 const char *cp; 462 int index = 0, score = 0; 463 464 /* Compatible match has highest priority */ 465 if (compat && compat[0]) { 466 prop = __of_find_property(device, "compatible", NULL); 467 for (cp = of_prop_next_string(prop, NULL); cp; 468 cp = of_prop_next_string(prop, cp), index++) { 469 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 470 score = INT_MAX/2 - (index << 2); 471 break; 472 } 473 } 474 if (!score) 475 return 0; 476 } 477 478 /* Matching type is better than matching name */ 479 if (type && type[0]) { 480 if (!device->type || of_node_cmp(type, device->type)) 481 return 0; 482 score += 2; 483 } 484 485 /* Matching name is a bit better than not */ 486 if (name && name[0]) { 487 if (!device->name || of_node_cmp(name, device->name)) 488 return 0; 489 score++; 490 } 491 492 return score; 493 } 494 495 /** Checks if the given "compat" string matches one of the strings in 496 * the device's "compatible" property 497 */ 498 int of_device_is_compatible(const struct device_node *device, 499 const char *compat) 500 { 501 unsigned long flags; 502 int res; 503 504 raw_spin_lock_irqsave(&devtree_lock, flags); 505 res = __of_device_is_compatible(device, compat, NULL, NULL); 506 raw_spin_unlock_irqrestore(&devtree_lock, flags); 507 return res; 508 } 509 EXPORT_SYMBOL(of_device_is_compatible); 510 511 /** Checks if the device is compatible with any of the entries in 512 * a NULL terminated array of strings. Returns the best match 513 * score or 0. 514 */ 515 int of_device_compatible_match(struct device_node *device, 516 const char *const *compat) 517 { 518 unsigned int tmp, score = 0; 519 520 if (!compat) 521 return 0; 522 523 while (*compat) { 524 tmp = of_device_is_compatible(device, *compat); 525 if (tmp > score) 526 score = tmp; 527 compat++; 528 } 529 530 return score; 531 } 532 533 /** 534 * of_machine_is_compatible - Test root of device tree for a given compatible value 535 * @compat: compatible string to look for in root node's compatible property. 536 * 537 * Returns a positive integer if the root node has the given value in its 538 * compatible property. 539 */ 540 int of_machine_is_compatible(const char *compat) 541 { 542 struct device_node *root; 543 int rc = 0; 544 545 root = of_find_node_by_path("/"); 546 if (root) { 547 rc = of_device_is_compatible(root, compat); 548 of_node_put(root); 549 } 550 return rc; 551 } 552 EXPORT_SYMBOL(of_machine_is_compatible); 553 554 /** 555 * __of_device_is_available - check if a device is available for use 556 * 557 * @device: Node to check for availability, with locks already held 558 * 559 * Returns true if the status property is absent or set to "okay" or "ok", 560 * false otherwise 561 */ 562 static bool __of_device_is_available(const struct device_node *device) 563 { 564 const char *status; 565 int statlen; 566 567 if (!device) 568 return false; 569 570 status = __of_get_property(device, "status", &statlen); 571 if (status == NULL) 572 return true; 573 574 if (statlen > 0) { 575 if (!strcmp(status, "okay") || !strcmp(status, "ok")) 576 return true; 577 } 578 579 return false; 580 } 581 582 /** 583 * of_device_is_available - check if a device is available for use 584 * 585 * @device: Node to check for availability 586 * 587 * Returns true if the status property is absent or set to "okay" or "ok", 588 * false otherwise 589 */ 590 bool of_device_is_available(const struct device_node *device) 591 { 592 unsigned long flags; 593 bool res; 594 595 raw_spin_lock_irqsave(&devtree_lock, flags); 596 res = __of_device_is_available(device); 597 raw_spin_unlock_irqrestore(&devtree_lock, flags); 598 return res; 599 600 } 601 EXPORT_SYMBOL(of_device_is_available); 602 603 /** 604 * of_device_is_big_endian - check if a device has BE registers 605 * 606 * @device: Node to check for endianness 607 * 608 * Returns true if the device has a "big-endian" property, or if the kernel 609 * was compiled for BE *and* the device has a "native-endian" property. 610 * Returns false otherwise. 611 * 612 * Callers would nominally use ioread32be/iowrite32be if 613 * of_device_is_big_endian() == true, or readl/writel otherwise. 614 */ 615 bool of_device_is_big_endian(const struct device_node *device) 616 { 617 if (of_property_read_bool(device, "big-endian")) 618 return true; 619 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && 620 of_property_read_bool(device, "native-endian")) 621 return true; 622 return false; 623 } 624 EXPORT_SYMBOL(of_device_is_big_endian); 625 626 /** 627 * of_get_parent - Get a node's parent if any 628 * @node: Node to get parent 629 * 630 * Returns a node pointer with refcount incremented, use 631 * of_node_put() on it when done. 632 */ 633 struct device_node *of_get_parent(const struct device_node *node) 634 { 635 struct device_node *np; 636 unsigned long flags; 637 638 if (!node) 639 return NULL; 640 641 raw_spin_lock_irqsave(&devtree_lock, flags); 642 np = of_node_get(node->parent); 643 raw_spin_unlock_irqrestore(&devtree_lock, flags); 644 return np; 645 } 646 EXPORT_SYMBOL(of_get_parent); 647 648 /** 649 * of_get_next_parent - Iterate to a node's parent 650 * @node: Node to get parent of 651 * 652 * This is like of_get_parent() except that it drops the 653 * refcount on the passed node, making it suitable for iterating 654 * through a node's parents. 655 * 656 * Returns a node pointer with refcount incremented, use 657 * of_node_put() on it when done. 658 */ 659 struct device_node *of_get_next_parent(struct device_node *node) 660 { 661 struct device_node *parent; 662 unsigned long flags; 663 664 if (!node) 665 return NULL; 666 667 raw_spin_lock_irqsave(&devtree_lock, flags); 668 parent = of_node_get(node->parent); 669 of_node_put(node); 670 raw_spin_unlock_irqrestore(&devtree_lock, flags); 671 return parent; 672 } 673 EXPORT_SYMBOL(of_get_next_parent); 674 675 static struct device_node *__of_get_next_child(const struct device_node *node, 676 struct device_node *prev) 677 { 678 struct device_node *next; 679 680 if (!node) 681 return NULL; 682 683 next = prev ? prev->sibling : node->child; 684 for (; next; next = next->sibling) 685 if (of_node_get(next)) 686 break; 687 of_node_put(prev); 688 return next; 689 } 690 #define __for_each_child_of_node(parent, child) \ 691 for (child = __of_get_next_child(parent, NULL); child != NULL; \ 692 child = __of_get_next_child(parent, child)) 693 694 /** 695 * of_get_next_child - Iterate a node childs 696 * @node: parent node 697 * @prev: previous child of the parent node, or NULL to get first 698 * 699 * Returns a node pointer with refcount incremented, use of_node_put() on 700 * it when done. Returns NULL when prev is the last child. Decrements the 701 * refcount of prev. 702 */ 703 struct device_node *of_get_next_child(const struct device_node *node, 704 struct device_node *prev) 705 { 706 struct device_node *next; 707 unsigned long flags; 708 709 raw_spin_lock_irqsave(&devtree_lock, flags); 710 next = __of_get_next_child(node, prev); 711 raw_spin_unlock_irqrestore(&devtree_lock, flags); 712 return next; 713 } 714 EXPORT_SYMBOL(of_get_next_child); 715 716 /** 717 * of_get_next_available_child - Find the next available child node 718 * @node: parent node 719 * @prev: previous child of the parent node, or NULL to get first 720 * 721 * This function is like of_get_next_child(), except that it 722 * automatically skips any disabled nodes (i.e. status = "disabled"). 723 */ 724 struct device_node *of_get_next_available_child(const struct device_node *node, 725 struct device_node *prev) 726 { 727 struct device_node *next; 728 unsigned long flags; 729 730 if (!node) 731 return NULL; 732 733 raw_spin_lock_irqsave(&devtree_lock, flags); 734 next = prev ? prev->sibling : node->child; 735 for (; next; next = next->sibling) { 736 if (!__of_device_is_available(next)) 737 continue; 738 if (of_node_get(next)) 739 break; 740 } 741 of_node_put(prev); 742 raw_spin_unlock_irqrestore(&devtree_lock, flags); 743 return next; 744 } 745 EXPORT_SYMBOL(of_get_next_available_child); 746 747 /** 748 * of_get_compatible_child - Find compatible child node 749 * @parent: parent node 750 * @compatible: compatible string 751 * 752 * Lookup child node whose compatible property contains the given compatible 753 * string. 754 * 755 * Returns a node pointer with refcount incremented, use of_node_put() on it 756 * when done; or NULL if not found. 757 */ 758 struct device_node *of_get_compatible_child(const struct device_node *parent, 759 const char *compatible) 760 { 761 struct device_node *child; 762 763 for_each_child_of_node(parent, child) { 764 if (of_device_is_compatible(child, compatible)) 765 break; 766 } 767 768 return child; 769 } 770 EXPORT_SYMBOL(of_get_compatible_child); 771 772 /** 773 * of_get_child_by_name - Find the child node by name for a given parent 774 * @node: parent node 775 * @name: child name to look for. 776 * 777 * This function looks for child node for given matching name 778 * 779 * Returns a node pointer if found, with refcount incremented, use 780 * of_node_put() on it when done. 781 * Returns NULL if node is not found. 782 */ 783 struct device_node *of_get_child_by_name(const struct device_node *node, 784 const char *name) 785 { 786 struct device_node *child; 787 788 for_each_child_of_node(node, child) 789 if (child->name && (of_node_cmp(child->name, name) == 0)) 790 break; 791 return child; 792 } 793 EXPORT_SYMBOL(of_get_child_by_name); 794 795 struct device_node *__of_find_node_by_path(struct device_node *parent, 796 const char *path) 797 { 798 struct device_node *child; 799 int len; 800 801 len = strcspn(path, "/:"); 802 if (!len) 803 return NULL; 804 805 __for_each_child_of_node(parent, child) { 806 const char *name = kbasename(child->full_name); 807 if (strncmp(path, name, len) == 0 && (strlen(name) == len)) 808 return child; 809 } 810 return NULL; 811 } 812 813 struct device_node *__of_find_node_by_full_path(struct device_node *node, 814 const char *path) 815 { 816 const char *separator = strchr(path, ':'); 817 818 while (node && *path == '/') { 819 struct device_node *tmp = node; 820 821 path++; /* Increment past '/' delimiter */ 822 node = __of_find_node_by_path(node, path); 823 of_node_put(tmp); 824 path = strchrnul(path, '/'); 825 if (separator && separator < path) 826 break; 827 } 828 return node; 829 } 830 831 /** 832 * of_find_node_opts_by_path - Find a node matching a full OF path 833 * @path: Either the full path to match, or if the path does not 834 * start with '/', the name of a property of the /aliases 835 * node (an alias). In the case of an alias, the node 836 * matching the alias' value will be returned. 837 * @opts: Address of a pointer into which to store the start of 838 * an options string appended to the end of the path with 839 * a ':' separator. 840 * 841 * Valid paths: 842 * /foo/bar Full path 843 * foo Valid alias 844 * foo/bar Valid alias + relative path 845 * 846 * Returns a node pointer with refcount incremented, use 847 * of_node_put() on it when done. 848 */ 849 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) 850 { 851 struct device_node *np = NULL; 852 struct property *pp; 853 unsigned long flags; 854 const char *separator = strchr(path, ':'); 855 856 if (opts) 857 *opts = separator ? separator + 1 : NULL; 858 859 if (strcmp(path, "/") == 0) 860 return of_node_get(of_root); 861 862 /* The path could begin with an alias */ 863 if (*path != '/') { 864 int len; 865 const char *p = separator; 866 867 if (!p) 868 p = strchrnul(path, '/'); 869 len = p - path; 870 871 /* of_aliases must not be NULL */ 872 if (!of_aliases) 873 return NULL; 874 875 for_each_property_of_node(of_aliases, pp) { 876 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { 877 np = of_find_node_by_path(pp->value); 878 break; 879 } 880 } 881 if (!np) 882 return NULL; 883 path = p; 884 } 885 886 /* Step down the tree matching path components */ 887 raw_spin_lock_irqsave(&devtree_lock, flags); 888 if (!np) 889 np = of_node_get(of_root); 890 np = __of_find_node_by_full_path(np, path); 891 raw_spin_unlock_irqrestore(&devtree_lock, flags); 892 return np; 893 } 894 EXPORT_SYMBOL(of_find_node_opts_by_path); 895 896 /** 897 * of_find_node_by_name - Find a node by its "name" property 898 * @from: The node to start searching from or NULL; the node 899 * you pass will not be searched, only the next one 900 * will. Typically, you pass what the previous call 901 * returned. of_node_put() will be called on @from. 902 * @name: The name string to match against 903 * 904 * Returns a node pointer with refcount incremented, use 905 * of_node_put() on it when done. 906 */ 907 struct device_node *of_find_node_by_name(struct device_node *from, 908 const char *name) 909 { 910 struct device_node *np; 911 unsigned long flags; 912 913 raw_spin_lock_irqsave(&devtree_lock, flags); 914 for_each_of_allnodes_from(from, np) 915 if (np->name && (of_node_cmp(np->name, name) == 0) 916 && of_node_get(np)) 917 break; 918 of_node_put(from); 919 raw_spin_unlock_irqrestore(&devtree_lock, flags); 920 return np; 921 } 922 EXPORT_SYMBOL(of_find_node_by_name); 923 924 /** 925 * of_find_node_by_type - Find a node by its "device_type" property 926 * @from: The node to start searching from, or NULL to start searching 927 * the entire device tree. The node you pass will not be 928 * searched, only the next one will; typically, you pass 929 * what the previous call returned. of_node_put() will be 930 * called on from for you. 931 * @type: The type string to match against 932 * 933 * Returns a node pointer with refcount incremented, use 934 * of_node_put() on it when done. 935 */ 936 struct device_node *of_find_node_by_type(struct device_node *from, 937 const char *type) 938 { 939 struct device_node *np; 940 unsigned long flags; 941 942 raw_spin_lock_irqsave(&devtree_lock, flags); 943 for_each_of_allnodes_from(from, np) 944 if (np->type && (of_node_cmp(np->type, type) == 0) 945 && of_node_get(np)) 946 break; 947 of_node_put(from); 948 raw_spin_unlock_irqrestore(&devtree_lock, flags); 949 return np; 950 } 951 EXPORT_SYMBOL(of_find_node_by_type); 952 953 /** 954 * of_find_compatible_node - Find a node based on type and one of the 955 * tokens in its "compatible" property 956 * @from: The node to start searching from or NULL, the node 957 * you pass will not be searched, only the next one 958 * will; typically, you pass what the previous call 959 * returned. of_node_put() will be called on it 960 * @type: The type string to match "device_type" or NULL to ignore 961 * @compatible: The string to match to one of the tokens in the device 962 * "compatible" list. 963 * 964 * Returns a node pointer with refcount incremented, use 965 * of_node_put() on it when done. 966 */ 967 struct device_node *of_find_compatible_node(struct device_node *from, 968 const char *type, const char *compatible) 969 { 970 struct device_node *np; 971 unsigned long flags; 972 973 raw_spin_lock_irqsave(&devtree_lock, flags); 974 for_each_of_allnodes_from(from, np) 975 if (__of_device_is_compatible(np, compatible, type, NULL) && 976 of_node_get(np)) 977 break; 978 of_node_put(from); 979 raw_spin_unlock_irqrestore(&devtree_lock, flags); 980 return np; 981 } 982 EXPORT_SYMBOL(of_find_compatible_node); 983 984 /** 985 * of_find_node_with_property - Find a node which has a property with 986 * the given name. 987 * @from: The node to start searching from or NULL, the node 988 * you pass will not be searched, only the next one 989 * will; typically, you pass what the previous call 990 * returned. of_node_put() will be called on it 991 * @prop_name: The name of the property to look for. 992 * 993 * Returns a node pointer with refcount incremented, use 994 * of_node_put() on it when done. 995 */ 996 struct device_node *of_find_node_with_property(struct device_node *from, 997 const char *prop_name) 998 { 999 struct device_node *np; 1000 struct property *pp; 1001 unsigned long flags; 1002 1003 raw_spin_lock_irqsave(&devtree_lock, flags); 1004 for_each_of_allnodes_from(from, np) { 1005 for (pp = np->properties; pp; pp = pp->next) { 1006 if (of_prop_cmp(pp->name, prop_name) == 0) { 1007 of_node_get(np); 1008 goto out; 1009 } 1010 } 1011 } 1012 out: 1013 of_node_put(from); 1014 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1015 return np; 1016 } 1017 EXPORT_SYMBOL(of_find_node_with_property); 1018 1019 static 1020 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 1021 const struct device_node *node) 1022 { 1023 const struct of_device_id *best_match = NULL; 1024 int score, best_score = 0; 1025 1026 if (!matches) 1027 return NULL; 1028 1029 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 1030 score = __of_device_is_compatible(node, matches->compatible, 1031 matches->type, matches->name); 1032 if (score > best_score) { 1033 best_match = matches; 1034 best_score = score; 1035 } 1036 } 1037 1038 return best_match; 1039 } 1040 1041 /** 1042 * of_match_node - Tell if a device_node has a matching of_match structure 1043 * @matches: array of of device match structures to search in 1044 * @node: the of device structure to match against 1045 * 1046 * Low level utility function used by device matching. 1047 */ 1048 const struct of_device_id *of_match_node(const struct of_device_id *matches, 1049 const struct device_node *node) 1050 { 1051 const struct of_device_id *match; 1052 unsigned long flags; 1053 1054 raw_spin_lock_irqsave(&devtree_lock, flags); 1055 match = __of_match_node(matches, node); 1056 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1057 return match; 1058 } 1059 EXPORT_SYMBOL(of_match_node); 1060 1061 /** 1062 * of_find_matching_node_and_match - Find a node based on an of_device_id 1063 * match table. 1064 * @from: The node to start searching from or NULL, the node 1065 * you pass will not be searched, only the next one 1066 * will; typically, you pass what the previous call 1067 * returned. of_node_put() will be called on it 1068 * @matches: array of of device match structures to search in 1069 * @match Updated to point at the matches entry which matched 1070 * 1071 * Returns a node pointer with refcount incremented, use 1072 * of_node_put() on it when done. 1073 */ 1074 struct device_node *of_find_matching_node_and_match(struct device_node *from, 1075 const struct of_device_id *matches, 1076 const struct of_device_id **match) 1077 { 1078 struct device_node *np; 1079 const struct of_device_id *m; 1080 unsigned long flags; 1081 1082 if (match) 1083 *match = NULL; 1084 1085 raw_spin_lock_irqsave(&devtree_lock, flags); 1086 for_each_of_allnodes_from(from, np) { 1087 m = __of_match_node(matches, np); 1088 if (m && of_node_get(np)) { 1089 if (match) 1090 *match = m; 1091 break; 1092 } 1093 } 1094 of_node_put(from); 1095 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1096 return np; 1097 } 1098 EXPORT_SYMBOL(of_find_matching_node_and_match); 1099 1100 /** 1101 * of_modalias_node - Lookup appropriate modalias for a device node 1102 * @node: pointer to a device tree node 1103 * @modalias: Pointer to buffer that modalias value will be copied into 1104 * @len: Length of modalias value 1105 * 1106 * Based on the value of the compatible property, this routine will attempt 1107 * to choose an appropriate modalias value for a particular device tree node. 1108 * It does this by stripping the manufacturer prefix (as delimited by a ',') 1109 * from the first entry in the compatible list property. 1110 * 1111 * This routine returns 0 on success, <0 on failure. 1112 */ 1113 int of_modalias_node(struct device_node *node, char *modalias, int len) 1114 { 1115 const char *compatible, *p; 1116 int cplen; 1117 1118 compatible = of_get_property(node, "compatible", &cplen); 1119 if (!compatible || strlen(compatible) > cplen) 1120 return -ENODEV; 1121 p = strchr(compatible, ','); 1122 strlcpy(modalias, p ? p + 1 : compatible, len); 1123 return 0; 1124 } 1125 EXPORT_SYMBOL_GPL(of_modalias_node); 1126 1127 /** 1128 * of_find_node_by_phandle - Find a node given a phandle 1129 * @handle: phandle of the node to find 1130 * 1131 * Returns a node pointer with refcount incremented, use 1132 * of_node_put() on it when done. 1133 */ 1134 struct device_node *of_find_node_by_phandle(phandle handle) 1135 { 1136 struct device_node *np = NULL; 1137 unsigned long flags; 1138 phandle masked_handle; 1139 1140 if (!handle) 1141 return NULL; 1142 1143 raw_spin_lock_irqsave(&devtree_lock, flags); 1144 1145 masked_handle = handle & phandle_cache_mask; 1146 1147 if (phandle_cache) { 1148 if (phandle_cache[masked_handle] && 1149 handle == phandle_cache[masked_handle]->phandle) 1150 np = phandle_cache[masked_handle]; 1151 } 1152 1153 if (!np) { 1154 for_each_of_allnodes(np) 1155 if (np->phandle == handle) { 1156 if (phandle_cache) 1157 phandle_cache[masked_handle] = np; 1158 break; 1159 } 1160 } 1161 1162 of_node_get(np); 1163 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1164 return np; 1165 } 1166 EXPORT_SYMBOL(of_find_node_by_phandle); 1167 1168 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) 1169 { 1170 int i; 1171 printk("%s %pOF", msg, args->np); 1172 for (i = 0; i < args->args_count; i++) { 1173 const char delim = i ? ',' : ':'; 1174 1175 pr_cont("%c%08x", delim, args->args[i]); 1176 } 1177 pr_cont("\n"); 1178 } 1179 1180 int of_phandle_iterator_init(struct of_phandle_iterator *it, 1181 const struct device_node *np, 1182 const char *list_name, 1183 const char *cells_name, 1184 int cell_count) 1185 { 1186 const __be32 *list; 1187 int size; 1188 1189 memset(it, 0, sizeof(*it)); 1190 1191 list = of_get_property(np, list_name, &size); 1192 if (!list) 1193 return -ENOENT; 1194 1195 it->cells_name = cells_name; 1196 it->cell_count = cell_count; 1197 it->parent = np; 1198 it->list_end = list + size / sizeof(*list); 1199 it->phandle_end = list; 1200 it->cur = list; 1201 1202 return 0; 1203 } 1204 EXPORT_SYMBOL_GPL(of_phandle_iterator_init); 1205 1206 int of_phandle_iterator_next(struct of_phandle_iterator *it) 1207 { 1208 uint32_t count = 0; 1209 1210 if (it->node) { 1211 of_node_put(it->node); 1212 it->node = NULL; 1213 } 1214 1215 if (!it->cur || it->phandle_end >= it->list_end) 1216 return -ENOENT; 1217 1218 it->cur = it->phandle_end; 1219 1220 /* If phandle is 0, then it is an empty entry with no arguments. */ 1221 it->phandle = be32_to_cpup(it->cur++); 1222 1223 if (it->phandle) { 1224 1225 /* 1226 * Find the provider node and parse the #*-cells property to 1227 * determine the argument length. 1228 */ 1229 it->node = of_find_node_by_phandle(it->phandle); 1230 1231 if (it->cells_name) { 1232 if (!it->node) { 1233 pr_err("%pOF: could not find phandle\n", 1234 it->parent); 1235 goto err; 1236 } 1237 1238 if (of_property_read_u32(it->node, it->cells_name, 1239 &count)) { 1240 pr_err("%pOF: could not get %s for %pOF\n", 1241 it->parent, 1242 it->cells_name, 1243 it->node); 1244 goto err; 1245 } 1246 } else { 1247 count = it->cell_count; 1248 } 1249 1250 /* 1251 * Make sure that the arguments actually fit in the remaining 1252 * property data length 1253 */ 1254 if (it->cur + count > it->list_end) { 1255 pr_err("%pOF: arguments longer than property\n", 1256 it->parent); 1257 goto err; 1258 } 1259 } 1260 1261 it->phandle_end = it->cur + count; 1262 it->cur_count = count; 1263 1264 return 0; 1265 1266 err: 1267 if (it->node) { 1268 of_node_put(it->node); 1269 it->node = NULL; 1270 } 1271 1272 return -EINVAL; 1273 } 1274 EXPORT_SYMBOL_GPL(of_phandle_iterator_next); 1275 1276 int of_phandle_iterator_args(struct of_phandle_iterator *it, 1277 uint32_t *args, 1278 int size) 1279 { 1280 int i, count; 1281 1282 count = it->cur_count; 1283 1284 if (WARN_ON(size < count)) 1285 count = size; 1286 1287 for (i = 0; i < count; i++) 1288 args[i] = be32_to_cpup(it->cur++); 1289 1290 return count; 1291 } 1292 1293 static int __of_parse_phandle_with_args(const struct device_node *np, 1294 const char *list_name, 1295 const char *cells_name, 1296 int cell_count, int index, 1297 struct of_phandle_args *out_args) 1298 { 1299 struct of_phandle_iterator it; 1300 int rc, cur_index = 0; 1301 1302 /* Loop over the phandles until all the requested entry is found */ 1303 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { 1304 /* 1305 * All of the error cases bail out of the loop, so at 1306 * this point, the parsing is successful. If the requested 1307 * index matches, then fill the out_args structure and return, 1308 * or return -ENOENT for an empty entry. 1309 */ 1310 rc = -ENOENT; 1311 if (cur_index == index) { 1312 if (!it.phandle) 1313 goto err; 1314 1315 if (out_args) { 1316 int c; 1317 1318 c = of_phandle_iterator_args(&it, 1319 out_args->args, 1320 MAX_PHANDLE_ARGS); 1321 out_args->np = it.node; 1322 out_args->args_count = c; 1323 } else { 1324 of_node_put(it.node); 1325 } 1326 1327 /* Found it! return success */ 1328 return 0; 1329 } 1330 1331 cur_index++; 1332 } 1333 1334 /* 1335 * Unlock node before returning result; will be one of: 1336 * -ENOENT : index is for empty phandle 1337 * -EINVAL : parsing error on data 1338 */ 1339 1340 err: 1341 of_node_put(it.node); 1342 return rc; 1343 } 1344 1345 /** 1346 * of_parse_phandle - Resolve a phandle property to a device_node pointer 1347 * @np: Pointer to device node holding phandle property 1348 * @phandle_name: Name of property holding a phandle value 1349 * @index: For properties holding a table of phandles, this is the index into 1350 * the table 1351 * 1352 * Returns the device_node pointer with refcount incremented. Use 1353 * of_node_put() on it when done. 1354 */ 1355 struct device_node *of_parse_phandle(const struct device_node *np, 1356 const char *phandle_name, int index) 1357 { 1358 struct of_phandle_args args; 1359 1360 if (index < 0) 1361 return NULL; 1362 1363 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0, 1364 index, &args)) 1365 return NULL; 1366 1367 return args.np; 1368 } 1369 EXPORT_SYMBOL(of_parse_phandle); 1370 1371 /** 1372 * of_parse_phandle_with_args() - Find a node pointed by phandle in a list 1373 * @np: pointer to a device tree node containing a list 1374 * @list_name: property name that contains a list 1375 * @cells_name: property name that specifies phandles' arguments count 1376 * @index: index of a phandle to parse out 1377 * @out_args: optional pointer to output arguments structure (will be filled) 1378 * 1379 * This function is useful to parse lists of phandles and their arguments. 1380 * Returns 0 on success and fills out_args, on error returns appropriate 1381 * errno value. 1382 * 1383 * Caller is responsible to call of_node_put() on the returned out_args->np 1384 * pointer. 1385 * 1386 * Example: 1387 * 1388 * phandle1: node1 { 1389 * #list-cells = <2>; 1390 * } 1391 * 1392 * phandle2: node2 { 1393 * #list-cells = <1>; 1394 * } 1395 * 1396 * node3 { 1397 * list = <&phandle1 1 2 &phandle2 3>; 1398 * } 1399 * 1400 * To get a device_node of the `node2' node you may call this: 1401 * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args); 1402 */ 1403 int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, 1404 const char *cells_name, int index, 1405 struct of_phandle_args *out_args) 1406 { 1407 if (index < 0) 1408 return -EINVAL; 1409 return __of_parse_phandle_with_args(np, list_name, cells_name, 0, 1410 index, out_args); 1411 } 1412 EXPORT_SYMBOL(of_parse_phandle_with_args); 1413 1414 /** 1415 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it 1416 * @np: pointer to a device tree node containing a list 1417 * @list_name: property name that contains a list 1418 * @stem_name: stem of property names that specify phandles' arguments count 1419 * @index: index of a phandle to parse out 1420 * @out_args: optional pointer to output arguments structure (will be filled) 1421 * 1422 * This function is useful to parse lists of phandles and their arguments. 1423 * Returns 0 on success and fills out_args, on error returns appropriate errno 1424 * value. The difference between this function and of_parse_phandle_with_args() 1425 * is that this API remaps a phandle if the node the phandle points to has 1426 * a <@stem_name>-map property. 1427 * 1428 * Caller is responsible to call of_node_put() on the returned out_args->np 1429 * pointer. 1430 * 1431 * Example: 1432 * 1433 * phandle1: node1 { 1434 * #list-cells = <2>; 1435 * } 1436 * 1437 * phandle2: node2 { 1438 * #list-cells = <1>; 1439 * } 1440 * 1441 * phandle3: node3 { 1442 * #list-cells = <1>; 1443 * list-map = <0 &phandle2 3>, 1444 * <1 &phandle2 2>, 1445 * <2 &phandle1 5 1>; 1446 * list-map-mask = <0x3>; 1447 * }; 1448 * 1449 * node4 { 1450 * list = <&phandle1 1 2 &phandle3 0>; 1451 * } 1452 * 1453 * To get a device_node of the `node2' node you may call this: 1454 * of_parse_phandle_with_args(node4, "list", "list", 1, &args); 1455 */ 1456 int of_parse_phandle_with_args_map(const struct device_node *np, 1457 const char *list_name, 1458 const char *stem_name, 1459 int index, struct of_phandle_args *out_args) 1460 { 1461 char *cells_name, *map_name = NULL, *mask_name = NULL; 1462 char *pass_name = NULL; 1463 struct device_node *cur, *new = NULL; 1464 const __be32 *map, *mask, *pass; 1465 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 }; 1466 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 }; 1467 __be32 initial_match_array[MAX_PHANDLE_ARGS]; 1468 const __be32 *match_array = initial_match_array; 1469 int i, ret, map_len, match; 1470 u32 list_size, new_size; 1471 1472 if (index < 0) 1473 return -EINVAL; 1474 1475 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); 1476 if (!cells_name) 1477 return -ENOMEM; 1478 1479 ret = -ENOMEM; 1480 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name); 1481 if (!map_name) 1482 goto free; 1483 1484 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); 1485 if (!mask_name) 1486 goto free; 1487 1488 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); 1489 if (!pass_name) 1490 goto free; 1491 1492 ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index, 1493 out_args); 1494 if (ret) 1495 goto free; 1496 1497 /* Get the #<list>-cells property */ 1498 cur = out_args->np; 1499 ret = of_property_read_u32(cur, cells_name, &list_size); 1500 if (ret < 0) 1501 goto put; 1502 1503 /* Precalculate the match array - this simplifies match loop */ 1504 for (i = 0; i < list_size; i++) 1505 initial_match_array[i] = cpu_to_be32(out_args->args[i]); 1506 1507 ret = -EINVAL; 1508 while (cur) { 1509 /* Get the <list>-map property */ 1510 map = of_get_property(cur, map_name, &map_len); 1511 if (!map) { 1512 ret = 0; 1513 goto free; 1514 } 1515 map_len /= sizeof(u32); 1516 1517 /* Get the <list>-map-mask property (optional) */ 1518 mask = of_get_property(cur, mask_name, NULL); 1519 if (!mask) 1520 mask = dummy_mask; 1521 /* Iterate through <list>-map property */ 1522 match = 0; 1523 while (map_len > (list_size + 1) && !match) { 1524 /* Compare specifiers */ 1525 match = 1; 1526 for (i = 0; i < list_size; i++, map_len--) 1527 match &= !((match_array[i] ^ *map++) & mask[i]); 1528 1529 of_node_put(new); 1530 new = of_find_node_by_phandle(be32_to_cpup(map)); 1531 map++; 1532 map_len--; 1533 1534 /* Check if not found */ 1535 if (!new) 1536 goto put; 1537 1538 if (!of_device_is_available(new)) 1539 match = 0; 1540 1541 ret = of_property_read_u32(new, cells_name, &new_size); 1542 if (ret) 1543 goto put; 1544 1545 /* Check for malformed properties */ 1546 if (WARN_ON(new_size > MAX_PHANDLE_ARGS)) 1547 goto put; 1548 if (map_len < new_size) 1549 goto put; 1550 1551 /* Move forward by new node's #<list>-cells amount */ 1552 map += new_size; 1553 map_len -= new_size; 1554 } 1555 if (!match) 1556 goto put; 1557 1558 /* Get the <list>-map-pass-thru property (optional) */ 1559 pass = of_get_property(cur, pass_name, NULL); 1560 if (!pass) 1561 pass = dummy_pass; 1562 1563 /* 1564 * Successfully parsed a <list>-map translation; copy new 1565 * specifier into the out_args structure, keeping the 1566 * bits specified in <list>-map-pass-thru. 1567 */ 1568 match_array = map - new_size; 1569 for (i = 0; i < new_size; i++) { 1570 __be32 val = *(map - new_size + i); 1571 1572 if (i < list_size) { 1573 val &= ~pass[i]; 1574 val |= cpu_to_be32(out_args->args[i]) & pass[i]; 1575 } 1576 1577 out_args->args[i] = be32_to_cpu(val); 1578 } 1579 out_args->args_count = list_size = new_size; 1580 /* Iterate again with new provider */ 1581 out_args->np = new; 1582 of_node_put(cur); 1583 cur = new; 1584 } 1585 put: 1586 of_node_put(cur); 1587 of_node_put(new); 1588 free: 1589 kfree(mask_name); 1590 kfree(map_name); 1591 kfree(cells_name); 1592 kfree(pass_name); 1593 1594 return ret; 1595 } 1596 EXPORT_SYMBOL(of_parse_phandle_with_args_map); 1597 1598 /** 1599 * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list 1600 * @np: pointer to a device tree node containing a list 1601 * @list_name: property name that contains a list 1602 * @cell_count: number of argument cells following the phandle 1603 * @index: index of a phandle to parse out 1604 * @out_args: optional pointer to output arguments structure (will be filled) 1605 * 1606 * This function is useful to parse lists of phandles and their arguments. 1607 * Returns 0 on success and fills out_args, on error returns appropriate 1608 * errno value. 1609 * 1610 * Caller is responsible to call of_node_put() on the returned out_args->np 1611 * pointer. 1612 * 1613 * Example: 1614 * 1615 * phandle1: node1 { 1616 * } 1617 * 1618 * phandle2: node2 { 1619 * } 1620 * 1621 * node3 { 1622 * list = <&phandle1 0 2 &phandle2 2 3>; 1623 * } 1624 * 1625 * To get a device_node of the `node2' node you may call this: 1626 * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args); 1627 */ 1628 int of_parse_phandle_with_fixed_args(const struct device_node *np, 1629 const char *list_name, int cell_count, 1630 int index, struct of_phandle_args *out_args) 1631 { 1632 if (index < 0) 1633 return -EINVAL; 1634 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count, 1635 index, out_args); 1636 } 1637 EXPORT_SYMBOL(of_parse_phandle_with_fixed_args); 1638 1639 /** 1640 * of_count_phandle_with_args() - Find the number of phandles references in a property 1641 * @np: pointer to a device tree node containing a list 1642 * @list_name: property name that contains a list 1643 * @cells_name: property name that specifies phandles' arguments count 1644 * 1645 * Returns the number of phandle + argument tuples within a property. It 1646 * is a typical pattern to encode a list of phandle and variable 1647 * arguments into a single property. The number of arguments is encoded 1648 * by a property in the phandle-target node. For example, a gpios 1649 * property would contain a list of GPIO specifies consisting of a 1650 * phandle and 1 or more arguments. The number of arguments are 1651 * determined by the #gpio-cells property in the node pointed to by the 1652 * phandle. 1653 */ 1654 int of_count_phandle_with_args(const struct device_node *np, const char *list_name, 1655 const char *cells_name) 1656 { 1657 struct of_phandle_iterator it; 1658 int rc, cur_index = 0; 1659 1660 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0); 1661 if (rc) 1662 return rc; 1663 1664 while ((rc = of_phandle_iterator_next(&it)) == 0) 1665 cur_index += 1; 1666 1667 if (rc != -ENOENT) 1668 return rc; 1669 1670 return cur_index; 1671 } 1672 EXPORT_SYMBOL(of_count_phandle_with_args); 1673 1674 /** 1675 * __of_add_property - Add a property to a node without lock operations 1676 */ 1677 int __of_add_property(struct device_node *np, struct property *prop) 1678 { 1679 struct property **next; 1680 1681 prop->next = NULL; 1682 next = &np->properties; 1683 while (*next) { 1684 if (strcmp(prop->name, (*next)->name) == 0) 1685 /* duplicate ! don't insert it */ 1686 return -EEXIST; 1687 1688 next = &(*next)->next; 1689 } 1690 *next = prop; 1691 1692 return 0; 1693 } 1694 1695 /** 1696 * of_add_property - Add a property to a node 1697 */ 1698 int of_add_property(struct device_node *np, struct property *prop) 1699 { 1700 unsigned long flags; 1701 int rc; 1702 1703 mutex_lock(&of_mutex); 1704 1705 raw_spin_lock_irqsave(&devtree_lock, flags); 1706 rc = __of_add_property(np, prop); 1707 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1708 1709 if (!rc) 1710 __of_add_property_sysfs(np, prop); 1711 1712 mutex_unlock(&of_mutex); 1713 1714 if (!rc) 1715 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); 1716 1717 return rc; 1718 } 1719 1720 int __of_remove_property(struct device_node *np, struct property *prop) 1721 { 1722 struct property **next; 1723 1724 for (next = &np->properties; *next; next = &(*next)->next) { 1725 if (*next == prop) 1726 break; 1727 } 1728 if (*next == NULL) 1729 return -ENODEV; 1730 1731 /* found the node */ 1732 *next = prop->next; 1733 prop->next = np->deadprops; 1734 np->deadprops = prop; 1735 1736 return 0; 1737 } 1738 1739 /** 1740 * of_remove_property - Remove a property from a node. 1741 * 1742 * Note that we don't actually remove it, since we have given out 1743 * who-knows-how-many pointers to the data using get-property. 1744 * Instead we just move the property to the "dead properties" 1745 * list, so it won't be found any more. 1746 */ 1747 int of_remove_property(struct device_node *np, struct property *prop) 1748 { 1749 unsigned long flags; 1750 int rc; 1751 1752 if (!prop) 1753 return -ENODEV; 1754 1755 mutex_lock(&of_mutex); 1756 1757 raw_spin_lock_irqsave(&devtree_lock, flags); 1758 rc = __of_remove_property(np, prop); 1759 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1760 1761 if (!rc) 1762 __of_remove_property_sysfs(np, prop); 1763 1764 mutex_unlock(&of_mutex); 1765 1766 if (!rc) 1767 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); 1768 1769 return rc; 1770 } 1771 1772 int __of_update_property(struct device_node *np, struct property *newprop, 1773 struct property **oldpropp) 1774 { 1775 struct property **next, *oldprop; 1776 1777 for (next = &np->properties; *next; next = &(*next)->next) { 1778 if (of_prop_cmp((*next)->name, newprop->name) == 0) 1779 break; 1780 } 1781 *oldpropp = oldprop = *next; 1782 1783 if (oldprop) { 1784 /* replace the node */ 1785 newprop->next = oldprop->next; 1786 *next = newprop; 1787 oldprop->next = np->deadprops; 1788 np->deadprops = oldprop; 1789 } else { 1790 /* new node */ 1791 newprop->next = NULL; 1792 *next = newprop; 1793 } 1794 1795 return 0; 1796 } 1797 1798 /* 1799 * of_update_property - Update a property in a node, if the property does 1800 * not exist, add it. 1801 * 1802 * Note that we don't actually remove it, since we have given out 1803 * who-knows-how-many pointers to the data using get-property. 1804 * Instead we just move the property to the "dead properties" list, 1805 * and add the new property to the property list 1806 */ 1807 int of_update_property(struct device_node *np, struct property *newprop) 1808 { 1809 struct property *oldprop; 1810 unsigned long flags; 1811 int rc; 1812 1813 if (!newprop->name) 1814 return -EINVAL; 1815 1816 mutex_lock(&of_mutex); 1817 1818 raw_spin_lock_irqsave(&devtree_lock, flags); 1819 rc = __of_update_property(np, newprop, &oldprop); 1820 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1821 1822 if (!rc) 1823 __of_update_property_sysfs(np, newprop, oldprop); 1824 1825 mutex_unlock(&of_mutex); 1826 1827 if (!rc) 1828 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); 1829 1830 return rc; 1831 } 1832 1833 static void of_alias_add(struct alias_prop *ap, struct device_node *np, 1834 int id, const char *stem, int stem_len) 1835 { 1836 ap->np = np; 1837 ap->id = id; 1838 strncpy(ap->stem, stem, stem_len); 1839 ap->stem[stem_len] = 0; 1840 list_add_tail(&ap->link, &aliases_lookup); 1841 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", 1842 ap->alias, ap->stem, ap->id, np); 1843 } 1844 1845 /** 1846 * of_alias_scan - Scan all properties of the 'aliases' node 1847 * 1848 * The function scans all the properties of the 'aliases' node and populates 1849 * the global lookup table with the properties. It returns the 1850 * number of alias properties found, or an error code in case of failure. 1851 * 1852 * @dt_alloc: An allocator that provides a virtual address to memory 1853 * for storing the resulting tree 1854 */ 1855 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) 1856 { 1857 struct property *pp; 1858 1859 of_aliases = of_find_node_by_path("/aliases"); 1860 of_chosen = of_find_node_by_path("/chosen"); 1861 if (of_chosen == NULL) 1862 of_chosen = of_find_node_by_path("/chosen@0"); 1863 1864 if (of_chosen) { 1865 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ 1866 const char *name = NULL; 1867 1868 if (of_property_read_string(of_chosen, "stdout-path", &name)) 1869 of_property_read_string(of_chosen, "linux,stdout-path", 1870 &name); 1871 if (IS_ENABLED(CONFIG_PPC) && !name) 1872 of_property_read_string(of_aliases, "stdout", &name); 1873 if (name) 1874 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1875 } 1876 1877 if (!of_aliases) 1878 return; 1879 1880 for_each_property_of_node(of_aliases, pp) { 1881 const char *start = pp->name; 1882 const char *end = start + strlen(start); 1883 struct device_node *np; 1884 struct alias_prop *ap; 1885 int id, len; 1886 1887 /* Skip those we do not want to proceed */ 1888 if (!strcmp(pp->name, "name") || 1889 !strcmp(pp->name, "phandle") || 1890 !strcmp(pp->name, "linux,phandle")) 1891 continue; 1892 1893 np = of_find_node_by_path(pp->value); 1894 if (!np) 1895 continue; 1896 1897 /* walk the alias backwards to extract the id and work out 1898 * the 'stem' string */ 1899 while (isdigit(*(end-1)) && end > start) 1900 end--; 1901 len = end - start; 1902 1903 if (kstrtoint(end, 10, &id) < 0) 1904 continue; 1905 1906 /* Allocate an alias_prop with enough space for the stem */ 1907 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); 1908 if (!ap) 1909 continue; 1910 memset(ap, 0, sizeof(*ap) + len + 1); 1911 ap->alias = start; 1912 of_alias_add(ap, np, id, start, len); 1913 } 1914 } 1915 1916 /** 1917 * of_alias_get_id - Get alias id for the given device_node 1918 * @np: Pointer to the given device_node 1919 * @stem: Alias stem of the given device_node 1920 * 1921 * The function travels the lookup table to get the alias id for the given 1922 * device_node and alias stem. It returns the alias id if found. 1923 */ 1924 int of_alias_get_id(struct device_node *np, const char *stem) 1925 { 1926 struct alias_prop *app; 1927 int id = -ENODEV; 1928 1929 mutex_lock(&of_mutex); 1930 list_for_each_entry(app, &aliases_lookup, link) { 1931 if (strcmp(app->stem, stem) != 0) 1932 continue; 1933 1934 if (np == app->np) { 1935 id = app->id; 1936 break; 1937 } 1938 } 1939 mutex_unlock(&of_mutex); 1940 1941 return id; 1942 } 1943 EXPORT_SYMBOL_GPL(of_alias_get_id); 1944 1945 /** 1946 * of_alias_get_highest_id - Get highest alias id for the given stem 1947 * @stem: Alias stem to be examined 1948 * 1949 * The function travels the lookup table to get the highest alias id for the 1950 * given alias stem. It returns the alias id if found. 1951 */ 1952 int of_alias_get_highest_id(const char *stem) 1953 { 1954 struct alias_prop *app; 1955 int id = -ENODEV; 1956 1957 mutex_lock(&of_mutex); 1958 list_for_each_entry(app, &aliases_lookup, link) { 1959 if (strcmp(app->stem, stem) != 0) 1960 continue; 1961 1962 if (app->id > id) 1963 id = app->id; 1964 } 1965 mutex_unlock(&of_mutex); 1966 1967 return id; 1968 } 1969 EXPORT_SYMBOL_GPL(of_alias_get_highest_id); 1970 1971 /** 1972 * of_console_check() - Test and setup console for DT setup 1973 * @dn - Pointer to device node 1974 * @name - Name to use for preferred console without index. ex. "ttyS" 1975 * @index - Index to use for preferred console. 1976 * 1977 * Check if the given device node matches the stdout-path property in the 1978 * /chosen node. If it does then register it as the preferred console and return 1979 * TRUE. Otherwise return FALSE. 1980 */ 1981 bool of_console_check(struct device_node *dn, char *name, int index) 1982 { 1983 if (!dn || dn != of_stdout || console_set_on_cmdline) 1984 return false; 1985 1986 /* 1987 * XXX: cast `options' to char pointer to suppress complication 1988 * warnings: printk, UART and console drivers expect char pointer. 1989 */ 1990 return !add_preferred_console(name, index, (char *)of_stdout_options); 1991 } 1992 EXPORT_SYMBOL_GPL(of_console_check); 1993 1994 /** 1995 * of_find_next_cache_node - Find a node's subsidiary cache 1996 * @np: node of type "cpu" or "cache" 1997 * 1998 * Returns a node pointer with refcount incremented, use 1999 * of_node_put() on it when done. Caller should hold a reference 2000 * to np. 2001 */ 2002 struct device_node *of_find_next_cache_node(const struct device_node *np) 2003 { 2004 struct device_node *child, *cache_node; 2005 2006 cache_node = of_parse_phandle(np, "l2-cache", 0); 2007 if (!cache_node) 2008 cache_node = of_parse_phandle(np, "next-level-cache", 0); 2009 2010 if (cache_node) 2011 return cache_node; 2012 2013 /* OF on pmac has nodes instead of properties named "l2-cache" 2014 * beneath CPU nodes. 2015 */ 2016 if (!strcmp(np->type, "cpu")) 2017 for_each_child_of_node(np, child) 2018 if (!strcmp(child->type, "cache")) 2019 return child; 2020 2021 return NULL; 2022 } 2023 2024 /** 2025 * of_find_last_cache_level - Find the level at which the last cache is 2026 * present for the given logical cpu 2027 * 2028 * @cpu: cpu number(logical index) for which the last cache level is needed 2029 * 2030 * Returns the the level at which the last cache is present. It is exactly 2031 * same as the total number of cache levels for the given logical cpu. 2032 */ 2033 int of_find_last_cache_level(unsigned int cpu) 2034 { 2035 u32 cache_level = 0; 2036 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); 2037 2038 while (np) { 2039 prev = np; 2040 of_node_put(np); 2041 np = of_find_next_cache_node(np); 2042 } 2043 2044 of_property_read_u32(prev, "cache-level", &cache_level); 2045 2046 return cache_level; 2047 } 2048