1 /* 2 * Procedures for creating, accessing and interpreting the device tree. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG 17 18 #include <stdarg.h> 19 #include <linux/config.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/init.h> 23 #include <linux/threads.h> 24 #include <linux/spinlock.h> 25 #include <linux/types.h> 26 #include <linux/pci.h> 27 #include <linux/stringify.h> 28 #include <linux/delay.h> 29 #include <linux/initrd.h> 30 #include <linux/bitops.h> 31 #include <linux/module.h> 32 #include <linux/kexec.h> 33 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 #include <asm/lmb.h> 37 #include <asm/page.h> 38 #include <asm/processor.h> 39 #include <asm/irq.h> 40 #include <asm/io.h> 41 #include <asm/kdump.h> 42 #include <asm/smp.h> 43 #include <asm/system.h> 44 #include <asm/mmu.h> 45 #include <asm/pgtable.h> 46 #include <asm/pci.h> 47 #include <asm/iommu.h> 48 #include <asm/btext.h> 49 #include <asm/sections.h> 50 #include <asm/machdep.h> 51 #include <asm/pSeries_reconfig.h> 52 #include <asm/pci-bridge.h> 53 54 #ifdef DEBUG 55 #define DBG(fmt...) printk(KERN_ERR fmt) 56 #else 57 #define DBG(fmt...) 58 #endif 59 60 61 static int __initdata dt_root_addr_cells; 62 static int __initdata dt_root_size_cells; 63 64 #ifdef CONFIG_PPC64 65 static int __initdata iommu_is_off; 66 int __initdata iommu_force_on; 67 unsigned long tce_alloc_start, tce_alloc_end; 68 #endif 69 70 typedef u32 cell_t; 71 72 #if 0 73 static struct boot_param_header *initial_boot_params __initdata; 74 #else 75 struct boot_param_header *initial_boot_params; 76 #endif 77 78 static struct device_node *allnodes = NULL; 79 80 /* use when traversing tree through the allnext, child, sibling, 81 * or parent members of struct device_node. 82 */ 83 static DEFINE_RWLOCK(devtree_lock); 84 85 /* export that to outside world */ 86 struct device_node *of_chosen; 87 88 struct device_node *dflt_interrupt_controller; 89 int num_interrupt_controllers; 90 91 /* 92 * Wrapper for allocating memory for various data that needs to be 93 * attached to device nodes as they are processed at boot or when 94 * added to the device tree later (e.g. DLPAR). At boot there is 95 * already a region reserved so we just increment *mem_start by size; 96 * otherwise we call kmalloc. 97 */ 98 static void * prom_alloc(unsigned long size, unsigned long *mem_start) 99 { 100 unsigned long tmp; 101 102 if (!mem_start) 103 return kmalloc(size, GFP_KERNEL); 104 105 tmp = *mem_start; 106 *mem_start += size; 107 return (void *)tmp; 108 } 109 110 /* 111 * Find the device_node with a given phandle. 112 */ 113 static struct device_node * find_phandle(phandle ph) 114 { 115 struct device_node *np; 116 117 for (np = allnodes; np != 0; np = np->allnext) 118 if (np->linux_phandle == ph) 119 return np; 120 return NULL; 121 } 122 123 /* 124 * Find the interrupt parent of a node. 125 */ 126 static struct device_node * __devinit intr_parent(struct device_node *p) 127 { 128 phandle *parp; 129 130 parp = (phandle *) get_property(p, "interrupt-parent", NULL); 131 if (parp == NULL) 132 return p->parent; 133 p = find_phandle(*parp); 134 if (p != NULL) 135 return p; 136 /* 137 * On a powermac booted with BootX, we don't get to know the 138 * phandles for any nodes, so find_phandle will return NULL. 139 * Fortunately these machines only have one interrupt controller 140 * so there isn't in fact any ambiguity. -- paulus 141 */ 142 if (num_interrupt_controllers == 1) 143 p = dflt_interrupt_controller; 144 return p; 145 } 146 147 /* 148 * Find out the size of each entry of the interrupts property 149 * for a node. 150 */ 151 int __devinit prom_n_intr_cells(struct device_node *np) 152 { 153 struct device_node *p; 154 unsigned int *icp; 155 156 for (p = np; (p = intr_parent(p)) != NULL; ) { 157 icp = (unsigned int *) 158 get_property(p, "#interrupt-cells", NULL); 159 if (icp != NULL) 160 return *icp; 161 if (get_property(p, "interrupt-controller", NULL) != NULL 162 || get_property(p, "interrupt-map", NULL) != NULL) { 163 printk("oops, node %s doesn't have #interrupt-cells\n", 164 p->full_name); 165 return 1; 166 } 167 } 168 #ifdef DEBUG_IRQ 169 printk("prom_n_intr_cells failed for %s\n", np->full_name); 170 #endif 171 return 1; 172 } 173 174 /* 175 * Map an interrupt from a device up to the platform interrupt 176 * descriptor. 177 */ 178 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler, 179 struct device_node *np, unsigned int *ints, 180 int nintrc) 181 { 182 struct device_node *p, *ipar; 183 unsigned int *imap, *imask, *ip; 184 int i, imaplen, match; 185 int newintrc = 0, newaddrc = 0; 186 unsigned int *reg; 187 int naddrc; 188 189 reg = (unsigned int *) get_property(np, "reg", NULL); 190 naddrc = prom_n_addr_cells(np); 191 p = intr_parent(np); 192 while (p != NULL) { 193 if (get_property(p, "interrupt-controller", NULL) != NULL) 194 /* this node is an interrupt controller, stop here */ 195 break; 196 imap = (unsigned int *) 197 get_property(p, "interrupt-map", &imaplen); 198 if (imap == NULL) { 199 p = intr_parent(p); 200 continue; 201 } 202 imask = (unsigned int *) 203 get_property(p, "interrupt-map-mask", NULL); 204 if (imask == NULL) { 205 printk("oops, %s has interrupt-map but no mask\n", 206 p->full_name); 207 return 0; 208 } 209 imaplen /= sizeof(unsigned int); 210 match = 0; 211 ipar = NULL; 212 while (imaplen > 0 && !match) { 213 /* check the child-interrupt field */ 214 match = 1; 215 for (i = 0; i < naddrc && match; ++i) 216 match = ((reg[i] ^ imap[i]) & imask[i]) == 0; 217 for (; i < naddrc + nintrc && match; ++i) 218 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0; 219 imap += naddrc + nintrc; 220 imaplen -= naddrc + nintrc; 221 /* grab the interrupt parent */ 222 ipar = find_phandle((phandle) *imap++); 223 --imaplen; 224 if (ipar == NULL && num_interrupt_controllers == 1) 225 /* cope with BootX not giving us phandles */ 226 ipar = dflt_interrupt_controller; 227 if (ipar == NULL) { 228 printk("oops, no int parent %x in map of %s\n", 229 imap[-1], p->full_name); 230 return 0; 231 } 232 /* find the parent's # addr and intr cells */ 233 ip = (unsigned int *) 234 get_property(ipar, "#interrupt-cells", NULL); 235 if (ip == NULL) { 236 printk("oops, no #interrupt-cells on %s\n", 237 ipar->full_name); 238 return 0; 239 } 240 newintrc = *ip; 241 ip = (unsigned int *) 242 get_property(ipar, "#address-cells", NULL); 243 newaddrc = (ip == NULL)? 0: *ip; 244 imap += newaddrc + newintrc; 245 imaplen -= newaddrc + newintrc; 246 } 247 if (imaplen < 0) { 248 printk("oops, error decoding int-map on %s, len=%d\n", 249 p->full_name, imaplen); 250 return 0; 251 } 252 if (!match) { 253 #ifdef DEBUG_IRQ 254 printk("oops, no match in %s int-map for %s\n", 255 p->full_name, np->full_name); 256 #endif 257 return 0; 258 } 259 p = ipar; 260 naddrc = newaddrc; 261 nintrc = newintrc; 262 ints = imap - nintrc; 263 reg = ints - naddrc; 264 } 265 if (p == NULL) { 266 #ifdef DEBUG_IRQ 267 printk("hmmm, int tree for %s doesn't have ctrler\n", 268 np->full_name); 269 #endif 270 return 0; 271 } 272 *irq = ints; 273 *ictrler = p; 274 return nintrc; 275 } 276 277 static unsigned char map_isa_senses[4] = { 278 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 279 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 280 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 281 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE 282 }; 283 284 static unsigned char map_mpic_senses[4] = { 285 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE, 286 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 287 /* 2 seems to be used for the 8259 cascade... */ 288 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 289 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 290 }; 291 292 static int __devinit finish_node_interrupts(struct device_node *np, 293 unsigned long *mem_start, 294 int measure_only) 295 { 296 unsigned int *ints; 297 int intlen, intrcells, intrcount; 298 int i, j, n, sense; 299 unsigned int *irq, virq; 300 struct device_node *ic; 301 int trace = 0; 302 303 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0) 304 #define TRACE(fmt...) 305 306 if (!strcmp(np->name, "smu-doorbell")) 307 trace = 1; 308 309 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n", 310 num_interrupt_controllers); 311 312 if (num_interrupt_controllers == 0) { 313 /* 314 * Old machines just have a list of interrupt numbers 315 * and no interrupt-controller nodes. 316 */ 317 ints = (unsigned int *) get_property(np, "AAPL,interrupts", 318 &intlen); 319 /* XXX old interpret_pci_props looked in parent too */ 320 /* XXX old interpret_macio_props looked for interrupts 321 before AAPL,interrupts */ 322 if (ints == NULL) 323 ints = (unsigned int *) get_property(np, "interrupts", 324 &intlen); 325 if (ints == NULL) 326 return 0; 327 328 np->n_intrs = intlen / sizeof(unsigned int); 329 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]), 330 mem_start); 331 if (!np->intrs) 332 return -ENOMEM; 333 if (measure_only) 334 return 0; 335 336 for (i = 0; i < np->n_intrs; ++i) { 337 np->intrs[i].line = *ints++; 338 np->intrs[i].sense = IRQ_SENSE_LEVEL 339 | IRQ_POLARITY_NEGATIVE; 340 } 341 return 0; 342 } 343 344 ints = (unsigned int *) get_property(np, "interrupts", &intlen); 345 TRACE("ints=%p, intlen=%d\n", ints, intlen); 346 if (ints == NULL) 347 return 0; 348 intrcells = prom_n_intr_cells(np); 349 intlen /= intrcells * sizeof(unsigned int); 350 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen); 351 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); 352 if (!np->intrs) 353 return -ENOMEM; 354 355 if (measure_only) 356 return 0; 357 358 intrcount = 0; 359 for (i = 0; i < intlen; ++i, ints += intrcells) { 360 n = map_interrupt(&irq, &ic, np, ints, intrcells); 361 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n); 362 if (n <= 0) 363 continue; 364 365 /* don't map IRQ numbers under a cascaded 8259 controller */ 366 if (ic && device_is_compatible(ic, "chrp,iic")) { 367 np->intrs[intrcount].line = irq[0]; 368 sense = (n > 1)? (irq[1] & 3): 3; 369 np->intrs[intrcount].sense = map_isa_senses[sense]; 370 } else { 371 virq = virt_irq_create_mapping(irq[0]); 372 TRACE("virq=%d\n", virq); 373 #ifdef CONFIG_PPC64 374 if (virq == NO_IRQ) { 375 printk(KERN_CRIT "Could not allocate interrupt" 376 " number for %s\n", np->full_name); 377 continue; 378 } 379 #endif 380 np->intrs[intrcount].line = irq_offset_up(virq); 381 sense = (n > 1)? (irq[1] & 3): 1; 382 383 /* Apple uses bits in there in a different way, let's 384 * only keep the real sense bit on macs 385 */ 386 if (_machine == PLATFORM_POWERMAC) 387 sense &= 0x1; 388 np->intrs[intrcount].sense = map_mpic_senses[sense]; 389 } 390 391 #ifdef CONFIG_PPC64 392 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ 393 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) { 394 char *name = get_property(ic->parent, "name", NULL); 395 if (name && !strcmp(name, "u3")) 396 np->intrs[intrcount].line += 128; 397 else if (!(name && (!strcmp(name, "mac-io") || 398 !strcmp(name, "u4")))) 399 /* ignore other cascaded controllers, such as 400 the k2-sata-root */ 401 break; 402 } 403 #endif /* CONFIG_PPC64 */ 404 if (n > 2) { 405 printk("hmmm, got %d intr cells for %s:", n, 406 np->full_name); 407 for (j = 0; j < n; ++j) 408 printk(" %d", irq[j]); 409 printk("\n"); 410 } 411 ++intrcount; 412 } 413 np->n_intrs = intrcount; 414 415 return 0; 416 } 417 418 static int __devinit finish_node(struct device_node *np, 419 unsigned long *mem_start, 420 int measure_only) 421 { 422 struct device_node *child; 423 int rc = 0; 424 425 rc = finish_node_interrupts(np, mem_start, measure_only); 426 if (rc) 427 goto out; 428 429 for (child = np->child; child != NULL; child = child->sibling) { 430 rc = finish_node(child, mem_start, measure_only); 431 if (rc) 432 goto out; 433 } 434 out: 435 return rc; 436 } 437 438 static void __init scan_interrupt_controllers(void) 439 { 440 struct device_node *np; 441 int n = 0; 442 char *name, *ic; 443 int iclen; 444 445 for (np = allnodes; np != NULL; np = np->allnext) { 446 ic = get_property(np, "interrupt-controller", &iclen); 447 name = get_property(np, "name", NULL); 448 /* checking iclen makes sure we don't get a false 449 match on /chosen.interrupt_controller */ 450 if ((name != NULL 451 && strcmp(name, "interrupt-controller") == 0) 452 || (ic != NULL && iclen == 0 453 && strcmp(name, "AppleKiwi"))) { 454 if (n == 0) 455 dflt_interrupt_controller = np; 456 ++n; 457 } 458 } 459 num_interrupt_controllers = n; 460 } 461 462 /** 463 * finish_device_tree is called once things are running normally 464 * (i.e. with text and data mapped to the address they were linked at). 465 * It traverses the device tree and fills in some of the additional, 466 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt 467 * mapping is also initialized at this point. 468 */ 469 void __init finish_device_tree(void) 470 { 471 unsigned long start, end, size = 0; 472 473 DBG(" -> finish_device_tree\n"); 474 475 #ifdef CONFIG_PPC64 476 /* Initialize virtual IRQ map */ 477 virt_irq_init(); 478 #endif 479 scan_interrupt_controllers(); 480 481 /* 482 * Finish device-tree (pre-parsing some properties etc...) 483 * We do this in 2 passes. One with "measure_only" set, which 484 * will only measure the amount of memory needed, then we can 485 * allocate that memory, and call finish_node again. However, 486 * we must be careful as most routines will fail nowadays when 487 * prom_alloc() returns 0, so we must make sure our first pass 488 * doesn't start at 0. We pre-initialize size to 16 for that 489 * reason and then remove those additional 16 bytes 490 */ 491 size = 16; 492 finish_node(allnodes, &size, 1); 493 size -= 16; 494 495 if (0 == size) 496 end = start = 0; 497 else 498 end = start = (unsigned long)__va(lmb_alloc(size, 128)); 499 500 finish_node(allnodes, &end, 0); 501 BUG_ON(end != start + size); 502 503 DBG(" <- finish_device_tree\n"); 504 } 505 506 static inline char *find_flat_dt_string(u32 offset) 507 { 508 return ((char *)initial_boot_params) + 509 initial_boot_params->off_dt_strings + offset; 510 } 511 512 /** 513 * This function is used to scan the flattened device-tree, it is 514 * used to extract the memory informations at boot before we can 515 * unflatten the tree 516 */ 517 int __init of_scan_flat_dt(int (*it)(unsigned long node, 518 const char *uname, int depth, 519 void *data), 520 void *data) 521 { 522 unsigned long p = ((unsigned long)initial_boot_params) + 523 initial_boot_params->off_dt_struct; 524 int rc = 0; 525 int depth = -1; 526 527 do { 528 u32 tag = *((u32 *)p); 529 char *pathp; 530 531 p += 4; 532 if (tag == OF_DT_END_NODE) { 533 depth --; 534 continue; 535 } 536 if (tag == OF_DT_NOP) 537 continue; 538 if (tag == OF_DT_END) 539 break; 540 if (tag == OF_DT_PROP) { 541 u32 sz = *((u32 *)p); 542 p += 8; 543 if (initial_boot_params->version < 0x10) 544 p = _ALIGN(p, sz >= 8 ? 8 : 4); 545 p += sz; 546 p = _ALIGN(p, 4); 547 continue; 548 } 549 if (tag != OF_DT_BEGIN_NODE) { 550 printk(KERN_WARNING "Invalid tag %x scanning flattened" 551 " device tree !\n", tag); 552 return -EINVAL; 553 } 554 depth++; 555 pathp = (char *)p; 556 p = _ALIGN(p + strlen(pathp) + 1, 4); 557 if ((*pathp) == '/') { 558 char *lp, *np; 559 for (lp = NULL, np = pathp; *np; np++) 560 if ((*np) == '/') 561 lp = np+1; 562 if (lp != NULL) 563 pathp = lp; 564 } 565 rc = it(p, pathp, depth, data); 566 if (rc != 0) 567 break; 568 } while(1); 569 570 return rc; 571 } 572 573 /** 574 * This function can be used within scan_flattened_dt callback to get 575 * access to properties 576 */ 577 void* __init of_get_flat_dt_prop(unsigned long node, const char *name, 578 unsigned long *size) 579 { 580 unsigned long p = node; 581 582 do { 583 u32 tag = *((u32 *)p); 584 u32 sz, noff; 585 const char *nstr; 586 587 p += 4; 588 if (tag == OF_DT_NOP) 589 continue; 590 if (tag != OF_DT_PROP) 591 return NULL; 592 593 sz = *((u32 *)p); 594 noff = *((u32 *)(p + 4)); 595 p += 8; 596 if (initial_boot_params->version < 0x10) 597 p = _ALIGN(p, sz >= 8 ? 8 : 4); 598 599 nstr = find_flat_dt_string(noff); 600 if (nstr == NULL) { 601 printk(KERN_WARNING "Can't find property index" 602 " name !\n"); 603 return NULL; 604 } 605 if (strcmp(name, nstr) == 0) { 606 if (size) 607 *size = sz; 608 return (void *)p; 609 } 610 p += sz; 611 p = _ALIGN(p, 4); 612 } while(1); 613 } 614 615 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 616 unsigned long align) 617 { 618 void *res; 619 620 *mem = _ALIGN(*mem, align); 621 res = (void *)*mem; 622 *mem += size; 623 624 return res; 625 } 626 627 static unsigned long __init unflatten_dt_node(unsigned long mem, 628 unsigned long *p, 629 struct device_node *dad, 630 struct device_node ***allnextpp, 631 unsigned long fpsize) 632 { 633 struct device_node *np; 634 struct property *pp, **prev_pp = NULL; 635 char *pathp; 636 u32 tag; 637 unsigned int l, allocl; 638 int has_name = 0; 639 int new_format = 0; 640 641 tag = *((u32 *)(*p)); 642 if (tag != OF_DT_BEGIN_NODE) { 643 printk("Weird tag at start of node: %x\n", tag); 644 return mem; 645 } 646 *p += 4; 647 pathp = (char *)*p; 648 l = allocl = strlen(pathp) + 1; 649 *p = _ALIGN(*p + l, 4); 650 651 /* version 0x10 has a more compact unit name here instead of the full 652 * path. we accumulate the full path size using "fpsize", we'll rebuild 653 * it later. We detect this because the first character of the name is 654 * not '/'. 655 */ 656 if ((*pathp) != '/') { 657 new_format = 1; 658 if (fpsize == 0) { 659 /* root node: special case. fpsize accounts for path 660 * plus terminating zero. root node only has '/', so 661 * fpsize should be 2, but we want to avoid the first 662 * level nodes to have two '/' so we use fpsize 1 here 663 */ 664 fpsize = 1; 665 allocl = 2; 666 } else { 667 /* account for '/' and path size minus terminal 0 668 * already in 'l' 669 */ 670 fpsize += l; 671 allocl = fpsize; 672 } 673 } 674 675 676 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 677 __alignof__(struct device_node)); 678 if (allnextpp) { 679 memset(np, 0, sizeof(*np)); 680 np->full_name = ((char*)np) + sizeof(struct device_node); 681 if (new_format) { 682 char *p = np->full_name; 683 /* rebuild full path for new format */ 684 if (dad && dad->parent) { 685 strcpy(p, dad->full_name); 686 #ifdef DEBUG 687 if ((strlen(p) + l + 1) != allocl) { 688 DBG("%s: p: %d, l: %d, a: %d\n", 689 pathp, strlen(p), l, allocl); 690 } 691 #endif 692 p += strlen(p); 693 } 694 *(p++) = '/'; 695 memcpy(p, pathp, l); 696 } else 697 memcpy(np->full_name, pathp, l); 698 prev_pp = &np->properties; 699 **allnextpp = np; 700 *allnextpp = &np->allnext; 701 if (dad != NULL) { 702 np->parent = dad; 703 /* we temporarily use the next field as `last_child'*/ 704 if (dad->next == 0) 705 dad->child = np; 706 else 707 dad->next->sibling = np; 708 dad->next = np; 709 } 710 kref_init(&np->kref); 711 } 712 while(1) { 713 u32 sz, noff; 714 char *pname; 715 716 tag = *((u32 *)(*p)); 717 if (tag == OF_DT_NOP) { 718 *p += 4; 719 continue; 720 } 721 if (tag != OF_DT_PROP) 722 break; 723 *p += 4; 724 sz = *((u32 *)(*p)); 725 noff = *((u32 *)((*p) + 4)); 726 *p += 8; 727 if (initial_boot_params->version < 0x10) 728 *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 729 730 pname = find_flat_dt_string(noff); 731 if (pname == NULL) { 732 printk("Can't find property name in list !\n"); 733 break; 734 } 735 if (strcmp(pname, "name") == 0) 736 has_name = 1; 737 l = strlen(pname) + 1; 738 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 739 __alignof__(struct property)); 740 if (allnextpp) { 741 if (strcmp(pname, "linux,phandle") == 0) { 742 np->node = *((u32 *)*p); 743 if (np->linux_phandle == 0) 744 np->linux_phandle = np->node; 745 } 746 if (strcmp(pname, "ibm,phandle") == 0) 747 np->linux_phandle = *((u32 *)*p); 748 pp->name = pname; 749 pp->length = sz; 750 pp->value = (void *)*p; 751 *prev_pp = pp; 752 prev_pp = &pp->next; 753 } 754 *p = _ALIGN((*p) + sz, 4); 755 } 756 /* with version 0x10 we may not have the name property, recreate 757 * it here from the unit name if absent 758 */ 759 if (!has_name) { 760 char *p = pathp, *ps = pathp, *pa = NULL; 761 int sz; 762 763 while (*p) { 764 if ((*p) == '@') 765 pa = p; 766 if ((*p) == '/') 767 ps = p + 1; 768 p++; 769 } 770 if (pa < ps) 771 pa = p; 772 sz = (pa - ps) + 1; 773 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 774 __alignof__(struct property)); 775 if (allnextpp) { 776 pp->name = "name"; 777 pp->length = sz; 778 pp->value = (unsigned char *)(pp + 1); 779 *prev_pp = pp; 780 prev_pp = &pp->next; 781 memcpy(pp->value, ps, sz - 1); 782 ((char *)pp->value)[sz - 1] = 0; 783 DBG("fixed up name for %s -> %s\n", pathp, pp->value); 784 } 785 } 786 if (allnextpp) { 787 *prev_pp = NULL; 788 np->name = get_property(np, "name", NULL); 789 np->type = get_property(np, "device_type", NULL); 790 791 if (!np->name) 792 np->name = "<NULL>"; 793 if (!np->type) 794 np->type = "<NULL>"; 795 } 796 while (tag == OF_DT_BEGIN_NODE) { 797 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 798 tag = *((u32 *)(*p)); 799 } 800 if (tag != OF_DT_END_NODE) { 801 printk("Weird tag at end of node: %x\n", tag); 802 return mem; 803 } 804 *p += 4; 805 return mem; 806 } 807 808 809 /** 810 * unflattens the device-tree passed by the firmware, creating the 811 * tree of struct device_node. It also fills the "name" and "type" 812 * pointers of the nodes so the normal device-tree walking functions 813 * can be used (this used to be done by finish_device_tree) 814 */ 815 void __init unflatten_device_tree(void) 816 { 817 unsigned long start, mem, size; 818 struct device_node **allnextp = &allnodes; 819 820 DBG(" -> unflatten_device_tree()\n"); 821 822 /* First pass, scan for size */ 823 start = ((unsigned long)initial_boot_params) + 824 initial_boot_params->off_dt_struct; 825 size = unflatten_dt_node(0, &start, NULL, NULL, 0); 826 size = (size | 3) + 1; 827 828 DBG(" size is %lx, allocating...\n", size); 829 830 /* Allocate memory for the expanded device tree */ 831 mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 832 if (!mem) { 833 DBG("Couldn't allocate memory with lmb_alloc()!\n"); 834 panic("Couldn't allocate memory with lmb_alloc()!\n"); 835 } 836 mem = (unsigned long) __va(mem); 837 838 ((u32 *)mem)[size / 4] = 0xdeadbeef; 839 840 DBG(" unflattening %lx...\n", mem); 841 842 /* Second pass, do actual unflattening */ 843 start = ((unsigned long)initial_boot_params) + 844 initial_boot_params->off_dt_struct; 845 unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 846 if (*((u32 *)start) != OF_DT_END) 847 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 848 if (((u32 *)mem)[size / 4] != 0xdeadbeef) 849 printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 850 ((u32 *)mem)[size / 4] ); 851 *allnextp = NULL; 852 853 /* Get pointer to OF "/chosen" node for use everywhere */ 854 of_chosen = of_find_node_by_path("/chosen"); 855 if (of_chosen == NULL) 856 of_chosen = of_find_node_by_path("/chosen@0"); 857 858 DBG(" <- unflatten_device_tree()\n"); 859 } 860 861 862 static int __init early_init_dt_scan_cpus(unsigned long node, 863 const char *uname, int depth, void *data) 864 { 865 u32 *prop; 866 unsigned long size; 867 char *type = of_get_flat_dt_prop(node, "device_type", &size); 868 869 /* We are scanning "cpu" nodes only */ 870 if (type == NULL || strcmp(type, "cpu") != 0) 871 return 0; 872 873 boot_cpuid = 0; 874 boot_cpuid_phys = 0; 875 if (initial_boot_params && initial_boot_params->version >= 2) { 876 /* version 2 of the kexec param format adds the phys cpuid 877 * of booted proc. 878 */ 879 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys; 880 } else { 881 /* Check if it's the boot-cpu, set it's hw index now */ 882 if (of_get_flat_dt_prop(node, 883 "linux,boot-cpu", NULL) != NULL) { 884 prop = of_get_flat_dt_prop(node, "reg", NULL); 885 if (prop != NULL) 886 boot_cpuid_phys = *prop; 887 } 888 } 889 set_hard_smp_processor_id(0, boot_cpuid_phys); 890 891 #ifdef CONFIG_ALTIVEC 892 /* Check if we have a VMX and eventually update CPU features */ 893 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); 894 if (prop && (*prop) > 0) { 895 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 896 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 897 } 898 899 /* Same goes for Apple's "altivec" property */ 900 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); 901 if (prop) { 902 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 903 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 904 } 905 #endif /* CONFIG_ALTIVEC */ 906 907 #ifdef CONFIG_PPC_PSERIES 908 /* 909 * Check for an SMT capable CPU and set the CPU feature. We do 910 * this by looking at the size of the ibm,ppc-interrupt-server#s 911 * property 912 */ 913 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", 914 &size); 915 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 916 if (prop && ((size / sizeof(u32)) > 1)) 917 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 918 #endif 919 920 return 0; 921 } 922 923 static int __init early_init_dt_scan_chosen(unsigned long node, 924 const char *uname, int depth, void *data) 925 { 926 u32 *prop; 927 unsigned long *lprop; 928 unsigned long l; 929 char *p; 930 931 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 932 933 if (depth != 1 || 934 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 935 return 0; 936 937 /* get platform type */ 938 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL); 939 if (prop == NULL) 940 return 0; 941 #ifdef CONFIG_PPC_MULTIPLATFORM 942 _machine = *prop; 943 #endif 944 945 #ifdef CONFIG_PPC64 946 /* check if iommu is forced on or off */ 947 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 948 iommu_is_off = 1; 949 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 950 iommu_force_on = 1; 951 #endif 952 953 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 954 if (lprop) 955 memory_limit = *lprop; 956 957 #ifdef CONFIG_PPC64 958 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 959 if (lprop) 960 tce_alloc_start = *lprop; 961 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 962 if (lprop) 963 tce_alloc_end = *lprop; 964 #endif 965 966 #ifdef CONFIG_PPC_RTAS 967 /* To help early debugging via the front panel, we retrieve a minimal 968 * set of RTAS infos now if available 969 */ 970 { 971 u64 *basep, *entryp; 972 973 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); 974 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); 975 prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); 976 if (basep && entryp && prop) { 977 rtas.base = *basep; 978 rtas.entry = *entryp; 979 rtas.size = *prop; 980 } 981 } 982 #endif /* CONFIG_PPC_RTAS */ 983 984 #ifdef CONFIG_KEXEC 985 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 986 if (lprop) 987 crashk_res.start = *lprop; 988 989 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 990 if (lprop) 991 crashk_res.end = crashk_res.start + *lprop - 1; 992 #endif 993 994 /* Retreive command line */ 995 p = of_get_flat_dt_prop(node, "bootargs", &l); 996 if (p != NULL && l > 0) 997 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 998 999 #ifdef CONFIG_CMDLINE 1000 if (l == 0 || (l == 1 && (*p) == 0)) 1001 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1002 #endif /* CONFIG_CMDLINE */ 1003 1004 DBG("Command line is: %s\n", cmd_line); 1005 1006 if (strstr(cmd_line, "mem=")) { 1007 char *p, *q; 1008 unsigned long maxmem = 0; 1009 1010 for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { 1011 q = p + 4; 1012 if (p > cmd_line && p[-1] != ' ') 1013 continue; 1014 maxmem = simple_strtoul(q, &q, 0); 1015 if (*q == 'k' || *q == 'K') { 1016 maxmem <<= 10; 1017 ++q; 1018 } else if (*q == 'm' || *q == 'M') { 1019 maxmem <<= 20; 1020 ++q; 1021 } else if (*q == 'g' || *q == 'G') { 1022 maxmem <<= 30; 1023 ++q; 1024 } 1025 } 1026 memory_limit = maxmem; 1027 } 1028 1029 /* break now */ 1030 return 1; 1031 } 1032 1033 static int __init early_init_dt_scan_root(unsigned long node, 1034 const char *uname, int depth, void *data) 1035 { 1036 u32 *prop; 1037 1038 if (depth != 0) 1039 return 0; 1040 1041 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 1042 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1043 DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 1044 1045 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 1046 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1047 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1048 1049 /* break now */ 1050 return 1; 1051 } 1052 1053 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) 1054 { 1055 cell_t *p = *cellp; 1056 unsigned long r; 1057 1058 /* Ignore more than 2 cells */ 1059 while (s > sizeof(unsigned long) / 4) { 1060 p++; 1061 s--; 1062 } 1063 r = *p++; 1064 #ifdef CONFIG_PPC64 1065 if (s > 1) { 1066 r <<= 32; 1067 r |= *(p++); 1068 s--; 1069 } 1070 #endif 1071 1072 *cellp = p; 1073 return r; 1074 } 1075 1076 1077 static int __init early_init_dt_scan_memory(unsigned long node, 1078 const char *uname, int depth, void *data) 1079 { 1080 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 1081 cell_t *reg, *endp; 1082 unsigned long l; 1083 1084 /* We are scanning "memory" nodes only */ 1085 if (type == NULL) { 1086 /* 1087 * The longtrail doesn't have a device_type on the 1088 * /memory node, so look for the node called /memory@0. 1089 */ 1090 if (depth != 1 || strcmp(uname, "memory@0") != 0) 1091 return 0; 1092 } else if (strcmp(type, "memory") != 0) 1093 return 0; 1094 1095 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 1096 if (reg == NULL) 1097 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 1098 if (reg == NULL) 1099 return 0; 1100 1101 endp = reg + (l / sizeof(cell_t)); 1102 1103 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 1104 uname, l, reg[0], reg[1], reg[2], reg[3]); 1105 1106 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1107 unsigned long base, size; 1108 1109 base = dt_mem_next_cell(dt_root_addr_cells, ®); 1110 size = dt_mem_next_cell(dt_root_size_cells, ®); 1111 1112 if (size == 0) 1113 continue; 1114 DBG(" - %lx , %lx\n", base, size); 1115 #ifdef CONFIG_PPC64 1116 if (iommu_is_off) { 1117 if (base >= 0x80000000ul) 1118 continue; 1119 if ((base + size) > 0x80000000ul) 1120 size = 0x80000000ul - base; 1121 } 1122 #endif 1123 lmb_add(base, size); 1124 } 1125 return 0; 1126 } 1127 1128 static void __init early_reserve_mem(void) 1129 { 1130 u64 base, size; 1131 u64 *reserve_map; 1132 1133 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 1134 initial_boot_params->off_mem_rsvmap); 1135 #ifdef CONFIG_PPC32 1136 /* 1137 * Handle the case where we might be booting from an old kexec 1138 * image that setup the mem_rsvmap as pairs of 32-bit values 1139 */ 1140 if (*reserve_map > 0xffffffffull) { 1141 u32 base_32, size_32; 1142 u32 *reserve_map_32 = (u32 *)reserve_map; 1143 1144 while (1) { 1145 base_32 = *(reserve_map_32++); 1146 size_32 = *(reserve_map_32++); 1147 if (size_32 == 0) 1148 break; 1149 DBG("reserving: %x -> %x\n", base_32, size_32); 1150 lmb_reserve(base_32, size_32); 1151 } 1152 return; 1153 } 1154 #endif 1155 while (1) { 1156 base = *(reserve_map++); 1157 size = *(reserve_map++); 1158 if (size == 0) 1159 break; 1160 DBG("reserving: %llx -> %llx\n", base, size); 1161 lmb_reserve(base, size); 1162 } 1163 1164 #if 0 1165 DBG("memory reserved, lmbs :\n"); 1166 lmb_dump_all(); 1167 #endif 1168 } 1169 1170 void __init early_init_devtree(void *params) 1171 { 1172 DBG(" -> early_init_devtree()\n"); 1173 1174 /* Setup flat device-tree pointer */ 1175 initial_boot_params = params; 1176 1177 /* Retrieve various informations from the /chosen node of the 1178 * device-tree, including the platform type, initrd location and 1179 * size, TCE reserve, and more ... 1180 */ 1181 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 1182 1183 /* Scan memory nodes and rebuild LMBs */ 1184 lmb_init(); 1185 of_scan_flat_dt(early_init_dt_scan_root, NULL); 1186 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1187 lmb_enforce_memory_limit(memory_limit); 1188 lmb_analyze(); 1189 1190 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1191 1192 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 1193 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 1194 #ifdef CONFIG_CRASH_DUMP 1195 lmb_reserve(0, KDUMP_RESERVE_LIMIT); 1196 #endif 1197 early_reserve_mem(); 1198 1199 DBG("Scanning CPUs ...\n"); 1200 1201 /* Retreive CPU related informations from the flat tree 1202 * (altivec support, boot CPU ID, ...) 1203 */ 1204 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 1205 1206 DBG(" <- early_init_devtree()\n"); 1207 } 1208 1209 #undef printk 1210 1211 int 1212 prom_n_addr_cells(struct device_node* np) 1213 { 1214 int* ip; 1215 do { 1216 if (np->parent) 1217 np = np->parent; 1218 ip = (int *) get_property(np, "#address-cells", NULL); 1219 if (ip != NULL) 1220 return *ip; 1221 } while (np->parent); 1222 /* No #address-cells property for the root node, default to 1 */ 1223 return 1; 1224 } 1225 EXPORT_SYMBOL(prom_n_addr_cells); 1226 1227 int 1228 prom_n_size_cells(struct device_node* np) 1229 { 1230 int* ip; 1231 do { 1232 if (np->parent) 1233 np = np->parent; 1234 ip = (int *) get_property(np, "#size-cells", NULL); 1235 if (ip != NULL) 1236 return *ip; 1237 } while (np->parent); 1238 /* No #size-cells property for the root node, default to 1 */ 1239 return 1; 1240 } 1241 EXPORT_SYMBOL(prom_n_size_cells); 1242 1243 /** 1244 * Work out the sense (active-low level / active-high edge) 1245 * of each interrupt from the device tree. 1246 */ 1247 void __init prom_get_irq_senses(unsigned char *senses, int off, int max) 1248 { 1249 struct device_node *np; 1250 int i, j; 1251 1252 /* default to level-triggered */ 1253 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off); 1254 1255 for (np = allnodes; np != 0; np = np->allnext) { 1256 for (j = 0; j < np->n_intrs; j++) { 1257 i = np->intrs[j].line; 1258 if (i >= off && i < max) 1259 senses[i-off] = np->intrs[j].sense; 1260 } 1261 } 1262 } 1263 1264 /** 1265 * Construct and return a list of the device_nodes with a given name. 1266 */ 1267 struct device_node *find_devices(const char *name) 1268 { 1269 struct device_node *head, **prevp, *np; 1270 1271 prevp = &head; 1272 for (np = allnodes; np != 0; np = np->allnext) { 1273 if (np->name != 0 && strcasecmp(np->name, name) == 0) { 1274 *prevp = np; 1275 prevp = &np->next; 1276 } 1277 } 1278 *prevp = NULL; 1279 return head; 1280 } 1281 EXPORT_SYMBOL(find_devices); 1282 1283 /** 1284 * Construct and return a list of the device_nodes with a given type. 1285 */ 1286 struct device_node *find_type_devices(const char *type) 1287 { 1288 struct device_node *head, **prevp, *np; 1289 1290 prevp = &head; 1291 for (np = allnodes; np != 0; np = np->allnext) { 1292 if (np->type != 0 && strcasecmp(np->type, type) == 0) { 1293 *prevp = np; 1294 prevp = &np->next; 1295 } 1296 } 1297 *prevp = NULL; 1298 return head; 1299 } 1300 EXPORT_SYMBOL(find_type_devices); 1301 1302 /** 1303 * Returns all nodes linked together 1304 */ 1305 struct device_node *find_all_nodes(void) 1306 { 1307 struct device_node *head, **prevp, *np; 1308 1309 prevp = &head; 1310 for (np = allnodes; np != 0; np = np->allnext) { 1311 *prevp = np; 1312 prevp = &np->next; 1313 } 1314 *prevp = NULL; 1315 return head; 1316 } 1317 EXPORT_SYMBOL(find_all_nodes); 1318 1319 /** Checks if the given "compat" string matches one of the strings in 1320 * the device's "compatible" property 1321 */ 1322 int device_is_compatible(struct device_node *device, const char *compat) 1323 { 1324 const char* cp; 1325 int cplen, l; 1326 1327 cp = (char *) get_property(device, "compatible", &cplen); 1328 if (cp == NULL) 1329 return 0; 1330 while (cplen > 0) { 1331 if (strncasecmp(cp, compat, strlen(compat)) == 0) 1332 return 1; 1333 l = strlen(cp) + 1; 1334 cp += l; 1335 cplen -= l; 1336 } 1337 1338 return 0; 1339 } 1340 EXPORT_SYMBOL(device_is_compatible); 1341 1342 1343 /** 1344 * Indicates whether the root node has a given value in its 1345 * compatible property. 1346 */ 1347 int machine_is_compatible(const char *compat) 1348 { 1349 struct device_node *root; 1350 int rc = 0; 1351 1352 root = of_find_node_by_path("/"); 1353 if (root) { 1354 rc = device_is_compatible(root, compat); 1355 of_node_put(root); 1356 } 1357 return rc; 1358 } 1359 EXPORT_SYMBOL(machine_is_compatible); 1360 1361 /** 1362 * Construct and return a list of the device_nodes with a given type 1363 * and compatible property. 1364 */ 1365 struct device_node *find_compatible_devices(const char *type, 1366 const char *compat) 1367 { 1368 struct device_node *head, **prevp, *np; 1369 1370 prevp = &head; 1371 for (np = allnodes; np != 0; np = np->allnext) { 1372 if (type != NULL 1373 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1374 continue; 1375 if (device_is_compatible(np, compat)) { 1376 *prevp = np; 1377 prevp = &np->next; 1378 } 1379 } 1380 *prevp = NULL; 1381 return head; 1382 } 1383 EXPORT_SYMBOL(find_compatible_devices); 1384 1385 /** 1386 * Find the device_node with a given full_name. 1387 */ 1388 struct device_node *find_path_device(const char *path) 1389 { 1390 struct device_node *np; 1391 1392 for (np = allnodes; np != 0; np = np->allnext) 1393 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0) 1394 return np; 1395 return NULL; 1396 } 1397 EXPORT_SYMBOL(find_path_device); 1398 1399 /******* 1400 * 1401 * New implementation of the OF "find" APIs, return a refcounted 1402 * object, call of_node_put() when done. The device tree and list 1403 * are protected by a rw_lock. 1404 * 1405 * Note that property management will need some locking as well, 1406 * this isn't dealt with yet. 1407 * 1408 *******/ 1409 1410 /** 1411 * of_find_node_by_name - Find a node by its "name" property 1412 * @from: The node to start searching from or NULL, the node 1413 * you pass will not be searched, only the next one 1414 * will; typically, you pass what the previous call 1415 * returned. of_node_put() will be called on it 1416 * @name: The name string to match against 1417 * 1418 * Returns a node pointer with refcount incremented, use 1419 * of_node_put() on it when done. 1420 */ 1421 struct device_node *of_find_node_by_name(struct device_node *from, 1422 const char *name) 1423 { 1424 struct device_node *np; 1425 1426 read_lock(&devtree_lock); 1427 np = from ? from->allnext : allnodes; 1428 for (; np != NULL; np = np->allnext) 1429 if (np->name != NULL && strcasecmp(np->name, name) == 0 1430 && of_node_get(np)) 1431 break; 1432 if (from) 1433 of_node_put(from); 1434 read_unlock(&devtree_lock); 1435 return np; 1436 } 1437 EXPORT_SYMBOL(of_find_node_by_name); 1438 1439 /** 1440 * of_find_node_by_type - Find a node by its "device_type" property 1441 * @from: The node to start searching from or NULL, the node 1442 * you pass will not be searched, only the next one 1443 * will; typically, you pass what the previous call 1444 * returned. of_node_put() will be called on it 1445 * @name: The type string to match against 1446 * 1447 * Returns a node pointer with refcount incremented, use 1448 * of_node_put() on it when done. 1449 */ 1450 struct device_node *of_find_node_by_type(struct device_node *from, 1451 const char *type) 1452 { 1453 struct device_node *np; 1454 1455 read_lock(&devtree_lock); 1456 np = from ? from->allnext : allnodes; 1457 for (; np != 0; np = np->allnext) 1458 if (np->type != 0 && strcasecmp(np->type, type) == 0 1459 && of_node_get(np)) 1460 break; 1461 if (from) 1462 of_node_put(from); 1463 read_unlock(&devtree_lock); 1464 return np; 1465 } 1466 EXPORT_SYMBOL(of_find_node_by_type); 1467 1468 /** 1469 * of_find_compatible_node - Find a node based on type and one of the 1470 * tokens in its "compatible" property 1471 * @from: The node to start searching from or NULL, the node 1472 * you pass will not be searched, only the next one 1473 * will; typically, you pass what the previous call 1474 * returned. of_node_put() will be called on it 1475 * @type: The type string to match "device_type" or NULL to ignore 1476 * @compatible: The string to match to one of the tokens in the device 1477 * "compatible" list. 1478 * 1479 * Returns a node pointer with refcount incremented, use 1480 * of_node_put() on it when done. 1481 */ 1482 struct device_node *of_find_compatible_node(struct device_node *from, 1483 const char *type, const char *compatible) 1484 { 1485 struct device_node *np; 1486 1487 read_lock(&devtree_lock); 1488 np = from ? from->allnext : allnodes; 1489 for (; np != 0; np = np->allnext) { 1490 if (type != NULL 1491 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1492 continue; 1493 if (device_is_compatible(np, compatible) && of_node_get(np)) 1494 break; 1495 } 1496 if (from) 1497 of_node_put(from); 1498 read_unlock(&devtree_lock); 1499 return np; 1500 } 1501 EXPORT_SYMBOL(of_find_compatible_node); 1502 1503 /** 1504 * of_find_node_by_path - Find a node matching a full OF path 1505 * @path: The full path to match 1506 * 1507 * Returns a node pointer with refcount incremented, use 1508 * of_node_put() on it when done. 1509 */ 1510 struct device_node *of_find_node_by_path(const char *path) 1511 { 1512 struct device_node *np = allnodes; 1513 1514 read_lock(&devtree_lock); 1515 for (; np != 0; np = np->allnext) { 1516 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1517 && of_node_get(np)) 1518 break; 1519 } 1520 read_unlock(&devtree_lock); 1521 return np; 1522 } 1523 EXPORT_SYMBOL(of_find_node_by_path); 1524 1525 /** 1526 * of_find_node_by_phandle - Find a node given a phandle 1527 * @handle: phandle of the node to find 1528 * 1529 * Returns a node pointer with refcount incremented, use 1530 * of_node_put() on it when done. 1531 */ 1532 struct device_node *of_find_node_by_phandle(phandle handle) 1533 { 1534 struct device_node *np; 1535 1536 read_lock(&devtree_lock); 1537 for (np = allnodes; np != 0; np = np->allnext) 1538 if (np->linux_phandle == handle) 1539 break; 1540 if (np) 1541 of_node_get(np); 1542 read_unlock(&devtree_lock); 1543 return np; 1544 } 1545 EXPORT_SYMBOL(of_find_node_by_phandle); 1546 1547 /** 1548 * of_find_all_nodes - Get next node in global list 1549 * @prev: Previous node or NULL to start iteration 1550 * of_node_put() will be called on it 1551 * 1552 * Returns a node pointer with refcount incremented, use 1553 * of_node_put() on it when done. 1554 */ 1555 struct device_node *of_find_all_nodes(struct device_node *prev) 1556 { 1557 struct device_node *np; 1558 1559 read_lock(&devtree_lock); 1560 np = prev ? prev->allnext : allnodes; 1561 for (; np != 0; np = np->allnext) 1562 if (of_node_get(np)) 1563 break; 1564 if (prev) 1565 of_node_put(prev); 1566 read_unlock(&devtree_lock); 1567 return np; 1568 } 1569 EXPORT_SYMBOL(of_find_all_nodes); 1570 1571 /** 1572 * of_get_parent - Get a node's parent if any 1573 * @node: Node to get parent 1574 * 1575 * Returns a node pointer with refcount incremented, use 1576 * of_node_put() on it when done. 1577 */ 1578 struct device_node *of_get_parent(const struct device_node *node) 1579 { 1580 struct device_node *np; 1581 1582 if (!node) 1583 return NULL; 1584 1585 read_lock(&devtree_lock); 1586 np = of_node_get(node->parent); 1587 read_unlock(&devtree_lock); 1588 return np; 1589 } 1590 EXPORT_SYMBOL(of_get_parent); 1591 1592 /** 1593 * of_get_next_child - Iterate a node childs 1594 * @node: parent node 1595 * @prev: previous child of the parent node, or NULL to get first 1596 * 1597 * Returns a node pointer with refcount incremented, use 1598 * of_node_put() on it when done. 1599 */ 1600 struct device_node *of_get_next_child(const struct device_node *node, 1601 struct device_node *prev) 1602 { 1603 struct device_node *next; 1604 1605 read_lock(&devtree_lock); 1606 next = prev ? prev->sibling : node->child; 1607 for (; next != 0; next = next->sibling) 1608 if (of_node_get(next)) 1609 break; 1610 if (prev) 1611 of_node_put(prev); 1612 read_unlock(&devtree_lock); 1613 return next; 1614 } 1615 EXPORT_SYMBOL(of_get_next_child); 1616 1617 /** 1618 * of_node_get - Increment refcount of a node 1619 * @node: Node to inc refcount, NULL is supported to 1620 * simplify writing of callers 1621 * 1622 * Returns node. 1623 */ 1624 struct device_node *of_node_get(struct device_node *node) 1625 { 1626 if (node) 1627 kref_get(&node->kref); 1628 return node; 1629 } 1630 EXPORT_SYMBOL(of_node_get); 1631 1632 static inline struct device_node * kref_to_device_node(struct kref *kref) 1633 { 1634 return container_of(kref, struct device_node, kref); 1635 } 1636 1637 /** 1638 * of_node_release - release a dynamically allocated node 1639 * @kref: kref element of the node to be released 1640 * 1641 * In of_node_put() this function is passed to kref_put() 1642 * as the destructor. 1643 */ 1644 static void of_node_release(struct kref *kref) 1645 { 1646 struct device_node *node = kref_to_device_node(kref); 1647 struct property *prop = node->properties; 1648 1649 if (!OF_IS_DYNAMIC(node)) 1650 return; 1651 while (prop) { 1652 struct property *next = prop->next; 1653 kfree(prop->name); 1654 kfree(prop->value); 1655 kfree(prop); 1656 prop = next; 1657 1658 if (!prop) { 1659 prop = node->deadprops; 1660 node->deadprops = NULL; 1661 } 1662 } 1663 kfree(node->intrs); 1664 kfree(node->full_name); 1665 kfree(node->data); 1666 kfree(node); 1667 } 1668 1669 /** 1670 * of_node_put - Decrement refcount of a node 1671 * @node: Node to dec refcount, NULL is supported to 1672 * simplify writing of callers 1673 * 1674 */ 1675 void of_node_put(struct device_node *node) 1676 { 1677 if (node) 1678 kref_put(&node->kref, of_node_release); 1679 } 1680 EXPORT_SYMBOL(of_node_put); 1681 1682 /* 1683 * Plug a device node into the tree and global list. 1684 */ 1685 void of_attach_node(struct device_node *np) 1686 { 1687 write_lock(&devtree_lock); 1688 np->sibling = np->parent->child; 1689 np->allnext = allnodes; 1690 np->parent->child = np; 1691 allnodes = np; 1692 write_unlock(&devtree_lock); 1693 } 1694 1695 /* 1696 * "Unplug" a node from the device tree. The caller must hold 1697 * a reference to the node. The memory associated with the node 1698 * is not freed until its refcount goes to zero. 1699 */ 1700 void of_detach_node(const struct device_node *np) 1701 { 1702 struct device_node *parent; 1703 1704 write_lock(&devtree_lock); 1705 1706 parent = np->parent; 1707 1708 if (allnodes == np) 1709 allnodes = np->allnext; 1710 else { 1711 struct device_node *prev; 1712 for (prev = allnodes; 1713 prev->allnext != np; 1714 prev = prev->allnext) 1715 ; 1716 prev->allnext = np->allnext; 1717 } 1718 1719 if (parent->child == np) 1720 parent->child = np->sibling; 1721 else { 1722 struct device_node *prevsib; 1723 for (prevsib = np->parent->child; 1724 prevsib->sibling != np; 1725 prevsib = prevsib->sibling) 1726 ; 1727 prevsib->sibling = np->sibling; 1728 } 1729 1730 write_unlock(&devtree_lock); 1731 } 1732 1733 #ifdef CONFIG_PPC_PSERIES 1734 /* 1735 * Fix up the uninitialized fields in a new device node: 1736 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields 1737 * 1738 * A lot of boot-time code is duplicated here, because functions such 1739 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the 1740 * slab allocator. 1741 * 1742 * This should probably be split up into smaller chunks. 1743 */ 1744 1745 static int of_finish_dynamic_node(struct device_node *node) 1746 { 1747 struct device_node *parent = of_get_parent(node); 1748 int err = 0; 1749 phandle *ibm_phandle; 1750 1751 node->name = get_property(node, "name", NULL); 1752 node->type = get_property(node, "device_type", NULL); 1753 1754 if (!parent) { 1755 err = -ENODEV; 1756 goto out; 1757 } 1758 1759 /* We don't support that function on PowerMac, at least 1760 * not yet 1761 */ 1762 if (_machine == PLATFORM_POWERMAC) 1763 return -ENODEV; 1764 1765 /* fix up new node's linux_phandle field */ 1766 if ((ibm_phandle = (unsigned int *)get_property(node, 1767 "ibm,phandle", NULL))) 1768 node->linux_phandle = *ibm_phandle; 1769 1770 out: 1771 of_node_put(parent); 1772 return err; 1773 } 1774 1775 static int prom_reconfig_notifier(struct notifier_block *nb, 1776 unsigned long action, void *node) 1777 { 1778 int err; 1779 1780 switch (action) { 1781 case PSERIES_RECONFIG_ADD: 1782 err = of_finish_dynamic_node(node); 1783 if (!err) 1784 finish_node(node, NULL, 0); 1785 if (err < 0) { 1786 printk(KERN_ERR "finish_node returned %d\n", err); 1787 err = NOTIFY_BAD; 1788 } 1789 break; 1790 default: 1791 err = NOTIFY_DONE; 1792 break; 1793 } 1794 return err; 1795 } 1796 1797 static struct notifier_block prom_reconfig_nb = { 1798 .notifier_call = prom_reconfig_notifier, 1799 .priority = 10, /* This one needs to run first */ 1800 }; 1801 1802 static int __init prom_reconfig_setup(void) 1803 { 1804 return pSeries_reconfig_notifier_register(&prom_reconfig_nb); 1805 } 1806 __initcall(prom_reconfig_setup); 1807 #endif 1808 1809 struct property *of_find_property(struct device_node *np, const char *name, 1810 int *lenp) 1811 { 1812 struct property *pp; 1813 1814 read_lock(&devtree_lock); 1815 for (pp = np->properties; pp != 0; pp = pp->next) 1816 if (strcmp(pp->name, name) == 0) { 1817 if (lenp != 0) 1818 *lenp = pp->length; 1819 break; 1820 } 1821 read_unlock(&devtree_lock); 1822 1823 return pp; 1824 } 1825 1826 /* 1827 * Find a property with a given name for a given node 1828 * and return the value. 1829 */ 1830 unsigned char *get_property(struct device_node *np, const char *name, 1831 int *lenp) 1832 { 1833 struct property *pp = of_find_property(np,name,lenp); 1834 return pp ? pp->value : NULL; 1835 } 1836 EXPORT_SYMBOL(get_property); 1837 1838 /* 1839 * Add a property to a node 1840 */ 1841 int prom_add_property(struct device_node* np, struct property* prop) 1842 { 1843 struct property **next; 1844 1845 prop->next = NULL; 1846 write_lock(&devtree_lock); 1847 next = &np->properties; 1848 while (*next) { 1849 if (strcmp(prop->name, (*next)->name) == 0) { 1850 /* duplicate ! don't insert it */ 1851 write_unlock(&devtree_lock); 1852 return -1; 1853 } 1854 next = &(*next)->next; 1855 } 1856 *next = prop; 1857 write_unlock(&devtree_lock); 1858 1859 #ifdef CONFIG_PROC_DEVICETREE 1860 /* try to add to proc as well if it was initialized */ 1861 if (np->pde) 1862 proc_device_tree_add_prop(np->pde, prop); 1863 #endif /* CONFIG_PROC_DEVICETREE */ 1864 1865 return 0; 1866 } 1867 1868 /* 1869 * Remove a property from a node. Note that we don't actually 1870 * remove it, since we have given out who-knows-how-many pointers 1871 * to the data using get-property. Instead we just move the property 1872 * to the "dead properties" list, so it won't be found any more. 1873 */ 1874 int prom_remove_property(struct device_node *np, struct property *prop) 1875 { 1876 struct property **next; 1877 int found = 0; 1878 1879 write_lock(&devtree_lock); 1880 next = &np->properties; 1881 while (*next) { 1882 if (*next == prop) { 1883 /* found the node */ 1884 *next = prop->next; 1885 prop->next = np->deadprops; 1886 np->deadprops = prop; 1887 found = 1; 1888 break; 1889 } 1890 next = &(*next)->next; 1891 } 1892 write_unlock(&devtree_lock); 1893 1894 if (!found) 1895 return -ENODEV; 1896 1897 #ifdef CONFIG_PROC_DEVICETREE 1898 /* try to remove the proc node as well */ 1899 if (np->pde) 1900 proc_device_tree_remove_prop(np->pde, prop); 1901 #endif /* CONFIG_PROC_DEVICETREE */ 1902 1903 return 0; 1904 } 1905 1906 /* 1907 * Update a property in a node. Note that we don't actually 1908 * remove it, since we have given out who-knows-how-many pointers 1909 * to the data using get-property. Instead we just move the property 1910 * to the "dead properties" list, and add the new property to the 1911 * property list 1912 */ 1913 int prom_update_property(struct device_node *np, 1914 struct property *newprop, 1915 struct property *oldprop) 1916 { 1917 struct property **next; 1918 int found = 0; 1919 1920 write_lock(&devtree_lock); 1921 next = &np->properties; 1922 while (*next) { 1923 if (*next == oldprop) { 1924 /* found the node */ 1925 newprop->next = oldprop->next; 1926 *next = newprop; 1927 oldprop->next = np->deadprops; 1928 np->deadprops = oldprop; 1929 found = 1; 1930 break; 1931 } 1932 next = &(*next)->next; 1933 } 1934 write_unlock(&devtree_lock); 1935 1936 if (!found) 1937 return -ENODEV; 1938 1939 #ifdef CONFIG_PROC_DEVICETREE 1940 /* try to add to proc as well if it was initialized */ 1941 if (np->pde) 1942 proc_device_tree_update_prop(np->pde, newprop, oldprop); 1943 #endif /* CONFIG_PROC_DEVICETREE */ 1944 1945 return 0; 1946 } 1947 1948 #ifdef CONFIG_KEXEC 1949 /* We may have allocated the flat device tree inside the crash kernel region 1950 * in prom_init. If so we need to move it out into regular memory. */ 1951 void kdump_move_device_tree(void) 1952 { 1953 unsigned long start, end; 1954 struct boot_param_header *new; 1955 1956 start = __pa((unsigned long)initial_boot_params); 1957 end = start + initial_boot_params->totalsize; 1958 1959 if (end < crashk_res.start || start > crashk_res.end) 1960 return; 1961 1962 new = (struct boot_param_header*) 1963 __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE)); 1964 1965 memcpy(new, initial_boot_params, initial_boot_params->totalsize); 1966 1967 initial_boot_params = new; 1968 1969 DBG("Flat device tree blob moved to %p\n", initial_boot_params); 1970 1971 /* XXX should we unreserve the old DT? */ 1972 } 1973 #endif /* CONFIG_KEXEC */ 1974