1 /* 2 * Procedures for creating, accessing and interpreting the device tree. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG 17 18 #include <stdarg.h> 19 #include <linux/config.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/init.h> 23 #include <linux/threads.h> 24 #include <linux/spinlock.h> 25 #include <linux/types.h> 26 #include <linux/pci.h> 27 #include <linux/stringify.h> 28 #include <linux/delay.h> 29 #include <linux/initrd.h> 30 #include <linux/bitops.h> 31 #include <linux/module.h> 32 #include <linux/kexec.h> 33 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 #include <asm/lmb.h> 37 #include <asm/page.h> 38 #include <asm/processor.h> 39 #include <asm/irq.h> 40 #include <asm/io.h> 41 #include <asm/kdump.h> 42 #include <asm/smp.h> 43 #include <asm/system.h> 44 #include <asm/mmu.h> 45 #include <asm/pgtable.h> 46 #include <asm/pci.h> 47 #include <asm/iommu.h> 48 #include <asm/btext.h> 49 #include <asm/sections.h> 50 #include <asm/machdep.h> 51 #include <asm/pSeries_reconfig.h> 52 #include <asm/pci-bridge.h> 53 54 #ifdef DEBUG 55 #define DBG(fmt...) printk(KERN_ERR fmt) 56 #else 57 #define DBG(fmt...) 58 #endif 59 60 61 static int __initdata dt_root_addr_cells; 62 static int __initdata dt_root_size_cells; 63 64 #ifdef CONFIG_PPC64 65 int __initdata iommu_is_off; 66 int __initdata iommu_force_on; 67 unsigned long tce_alloc_start, tce_alloc_end; 68 #endif 69 70 typedef u32 cell_t; 71 72 #if 0 73 static struct boot_param_header *initial_boot_params __initdata; 74 #else 75 struct boot_param_header *initial_boot_params; 76 #endif 77 78 static struct device_node *allnodes = NULL; 79 80 /* use when traversing tree through the allnext, child, sibling, 81 * or parent members of struct device_node. 82 */ 83 static DEFINE_RWLOCK(devtree_lock); 84 85 /* export that to outside world */ 86 struct device_node *of_chosen; 87 88 struct device_node *dflt_interrupt_controller; 89 int num_interrupt_controllers; 90 91 /* 92 * Wrapper for allocating memory for various data that needs to be 93 * attached to device nodes as they are processed at boot or when 94 * added to the device tree later (e.g. DLPAR). At boot there is 95 * already a region reserved so we just increment *mem_start by size; 96 * otherwise we call kmalloc. 97 */ 98 static void * prom_alloc(unsigned long size, unsigned long *mem_start) 99 { 100 unsigned long tmp; 101 102 if (!mem_start) 103 return kmalloc(size, GFP_KERNEL); 104 105 tmp = *mem_start; 106 *mem_start += size; 107 return (void *)tmp; 108 } 109 110 /* 111 * Find the device_node with a given phandle. 112 */ 113 static struct device_node * find_phandle(phandle ph) 114 { 115 struct device_node *np; 116 117 for (np = allnodes; np != 0; np = np->allnext) 118 if (np->linux_phandle == ph) 119 return np; 120 return NULL; 121 } 122 123 /* 124 * Find the interrupt parent of a node. 125 */ 126 static struct device_node * __devinit intr_parent(struct device_node *p) 127 { 128 phandle *parp; 129 130 parp = (phandle *) get_property(p, "interrupt-parent", NULL); 131 if (parp == NULL) 132 return p->parent; 133 p = find_phandle(*parp); 134 if (p != NULL) 135 return p; 136 /* 137 * On a powermac booted with BootX, we don't get to know the 138 * phandles for any nodes, so find_phandle will return NULL. 139 * Fortunately these machines only have one interrupt controller 140 * so there isn't in fact any ambiguity. -- paulus 141 */ 142 if (num_interrupt_controllers == 1) 143 p = dflt_interrupt_controller; 144 return p; 145 } 146 147 /* 148 * Find out the size of each entry of the interrupts property 149 * for a node. 150 */ 151 int __devinit prom_n_intr_cells(struct device_node *np) 152 { 153 struct device_node *p; 154 unsigned int *icp; 155 156 for (p = np; (p = intr_parent(p)) != NULL; ) { 157 icp = (unsigned int *) 158 get_property(p, "#interrupt-cells", NULL); 159 if (icp != NULL) 160 return *icp; 161 if (get_property(p, "interrupt-controller", NULL) != NULL 162 || get_property(p, "interrupt-map", NULL) != NULL) { 163 printk("oops, node %s doesn't have #interrupt-cells\n", 164 p->full_name); 165 return 1; 166 } 167 } 168 #ifdef DEBUG_IRQ 169 printk("prom_n_intr_cells failed for %s\n", np->full_name); 170 #endif 171 return 1; 172 } 173 174 /* 175 * Map an interrupt from a device up to the platform interrupt 176 * descriptor. 177 */ 178 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler, 179 struct device_node *np, unsigned int *ints, 180 int nintrc) 181 { 182 struct device_node *p, *ipar; 183 unsigned int *imap, *imask, *ip; 184 int i, imaplen, match; 185 int newintrc = 0, newaddrc = 0; 186 unsigned int *reg; 187 int naddrc; 188 189 reg = (unsigned int *) get_property(np, "reg", NULL); 190 naddrc = prom_n_addr_cells(np); 191 p = intr_parent(np); 192 while (p != NULL) { 193 if (get_property(p, "interrupt-controller", NULL) != NULL) 194 /* this node is an interrupt controller, stop here */ 195 break; 196 imap = (unsigned int *) 197 get_property(p, "interrupt-map", &imaplen); 198 if (imap == NULL) { 199 p = intr_parent(p); 200 continue; 201 } 202 imask = (unsigned int *) 203 get_property(p, "interrupt-map-mask", NULL); 204 if (imask == NULL) { 205 printk("oops, %s has interrupt-map but no mask\n", 206 p->full_name); 207 return 0; 208 } 209 imaplen /= sizeof(unsigned int); 210 match = 0; 211 ipar = NULL; 212 while (imaplen > 0 && !match) { 213 /* check the child-interrupt field */ 214 match = 1; 215 for (i = 0; i < naddrc && match; ++i) 216 match = ((reg[i] ^ imap[i]) & imask[i]) == 0; 217 for (; i < naddrc + nintrc && match; ++i) 218 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0; 219 imap += naddrc + nintrc; 220 imaplen -= naddrc + nintrc; 221 /* grab the interrupt parent */ 222 ipar = find_phandle((phandle) *imap++); 223 --imaplen; 224 if (ipar == NULL && num_interrupt_controllers == 1) 225 /* cope with BootX not giving us phandles */ 226 ipar = dflt_interrupt_controller; 227 if (ipar == NULL) { 228 printk("oops, no int parent %x in map of %s\n", 229 imap[-1], p->full_name); 230 return 0; 231 } 232 /* find the parent's # addr and intr cells */ 233 ip = (unsigned int *) 234 get_property(ipar, "#interrupt-cells", NULL); 235 if (ip == NULL) { 236 printk("oops, no #interrupt-cells on %s\n", 237 ipar->full_name); 238 return 0; 239 } 240 newintrc = *ip; 241 ip = (unsigned int *) 242 get_property(ipar, "#address-cells", NULL); 243 newaddrc = (ip == NULL)? 0: *ip; 244 imap += newaddrc + newintrc; 245 imaplen -= newaddrc + newintrc; 246 } 247 if (imaplen < 0) { 248 printk("oops, error decoding int-map on %s, len=%d\n", 249 p->full_name, imaplen); 250 return 0; 251 } 252 if (!match) { 253 #ifdef DEBUG_IRQ 254 printk("oops, no match in %s int-map for %s\n", 255 p->full_name, np->full_name); 256 #endif 257 return 0; 258 } 259 p = ipar; 260 naddrc = newaddrc; 261 nintrc = newintrc; 262 ints = imap - nintrc; 263 reg = ints - naddrc; 264 } 265 if (p == NULL) { 266 #ifdef DEBUG_IRQ 267 printk("hmmm, int tree for %s doesn't have ctrler\n", 268 np->full_name); 269 #endif 270 return 0; 271 } 272 *irq = ints; 273 *ictrler = p; 274 return nintrc; 275 } 276 277 static unsigned char map_isa_senses[4] = { 278 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 279 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 280 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 281 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE 282 }; 283 284 static unsigned char map_mpic_senses[4] = { 285 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE, 286 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 287 /* 2 seems to be used for the 8259 cascade... */ 288 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 289 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 290 }; 291 292 static int __devinit finish_node_interrupts(struct device_node *np, 293 unsigned long *mem_start, 294 int measure_only) 295 { 296 unsigned int *ints; 297 int intlen, intrcells, intrcount; 298 int i, j, n, sense; 299 unsigned int *irq, virq; 300 struct device_node *ic; 301 int trace = 0; 302 303 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0) 304 #define TRACE(fmt...) 305 306 if (!strcmp(np->name, "smu-doorbell")) 307 trace = 1; 308 309 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n", 310 num_interrupt_controllers); 311 312 if (num_interrupt_controllers == 0) { 313 /* 314 * Old machines just have a list of interrupt numbers 315 * and no interrupt-controller nodes. 316 */ 317 ints = (unsigned int *) get_property(np, "AAPL,interrupts", 318 &intlen); 319 /* XXX old interpret_pci_props looked in parent too */ 320 /* XXX old interpret_macio_props looked for interrupts 321 before AAPL,interrupts */ 322 if (ints == NULL) 323 ints = (unsigned int *) get_property(np, "interrupts", 324 &intlen); 325 if (ints == NULL) 326 return 0; 327 328 np->n_intrs = intlen / sizeof(unsigned int); 329 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]), 330 mem_start); 331 if (!np->intrs) 332 return -ENOMEM; 333 if (measure_only) 334 return 0; 335 336 for (i = 0; i < np->n_intrs; ++i) { 337 np->intrs[i].line = *ints++; 338 np->intrs[i].sense = IRQ_SENSE_LEVEL 339 | IRQ_POLARITY_NEGATIVE; 340 } 341 return 0; 342 } 343 344 ints = (unsigned int *) get_property(np, "interrupts", &intlen); 345 TRACE("ints=%p, intlen=%d\n", ints, intlen); 346 if (ints == NULL) 347 return 0; 348 intrcells = prom_n_intr_cells(np); 349 intlen /= intrcells * sizeof(unsigned int); 350 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen); 351 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); 352 if (!np->intrs) 353 return -ENOMEM; 354 355 if (measure_only) 356 return 0; 357 358 intrcount = 0; 359 for (i = 0; i < intlen; ++i, ints += intrcells) { 360 n = map_interrupt(&irq, &ic, np, ints, intrcells); 361 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n); 362 if (n <= 0) 363 continue; 364 365 /* don't map IRQ numbers under a cascaded 8259 controller */ 366 if (ic && device_is_compatible(ic, "chrp,iic")) { 367 np->intrs[intrcount].line = irq[0]; 368 sense = (n > 1)? (irq[1] & 3): 3; 369 np->intrs[intrcount].sense = map_isa_senses[sense]; 370 } else { 371 virq = virt_irq_create_mapping(irq[0]); 372 TRACE("virq=%d\n", virq); 373 #ifdef CONFIG_PPC64 374 if (virq == NO_IRQ) { 375 printk(KERN_CRIT "Could not allocate interrupt" 376 " number for %s\n", np->full_name); 377 continue; 378 } 379 #endif 380 np->intrs[intrcount].line = irq_offset_up(virq); 381 sense = (n > 1)? (irq[1] & 3): 1; 382 383 /* Apple uses bits in there in a different way, let's 384 * only keep the real sense bit on macs 385 */ 386 if (machine_is(powermac)) 387 sense &= 0x1; 388 np->intrs[intrcount].sense = map_mpic_senses[sense]; 389 } 390 391 #ifdef CONFIG_PPC64 392 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ 393 if (machine_is(powermac) && ic && ic->parent) { 394 char *name = get_property(ic->parent, "name", NULL); 395 if (name && !strcmp(name, "u3")) 396 np->intrs[intrcount].line += 128; 397 else if (!(name && (!strcmp(name, "mac-io") || 398 !strcmp(name, "u4")))) 399 /* ignore other cascaded controllers, such as 400 the k2-sata-root */ 401 break; 402 } 403 #endif /* CONFIG_PPC64 */ 404 if (n > 2) { 405 printk("hmmm, got %d intr cells for %s:", n, 406 np->full_name); 407 for (j = 0; j < n; ++j) 408 printk(" %d", irq[j]); 409 printk("\n"); 410 } 411 ++intrcount; 412 } 413 np->n_intrs = intrcount; 414 415 return 0; 416 } 417 418 static int __devinit finish_node(struct device_node *np, 419 unsigned long *mem_start, 420 int measure_only) 421 { 422 struct device_node *child; 423 int rc = 0; 424 425 rc = finish_node_interrupts(np, mem_start, measure_only); 426 if (rc) 427 goto out; 428 429 for (child = np->child; child != NULL; child = child->sibling) { 430 rc = finish_node(child, mem_start, measure_only); 431 if (rc) 432 goto out; 433 } 434 out: 435 return rc; 436 } 437 438 static void __init scan_interrupt_controllers(void) 439 { 440 struct device_node *np; 441 int n = 0; 442 char *name, *ic; 443 int iclen; 444 445 for (np = allnodes; np != NULL; np = np->allnext) { 446 ic = get_property(np, "interrupt-controller", &iclen); 447 name = get_property(np, "name", NULL); 448 /* checking iclen makes sure we don't get a false 449 match on /chosen.interrupt_controller */ 450 if ((name != NULL 451 && strcmp(name, "interrupt-controller") == 0) 452 || (ic != NULL && iclen == 0 453 && strcmp(name, "AppleKiwi"))) { 454 if (n == 0) 455 dflt_interrupt_controller = np; 456 ++n; 457 } 458 } 459 num_interrupt_controllers = n; 460 } 461 462 /** 463 * finish_device_tree is called once things are running normally 464 * (i.e. with text and data mapped to the address they were linked at). 465 * It traverses the device tree and fills in some of the additional, 466 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt 467 * mapping is also initialized at this point. 468 */ 469 void __init finish_device_tree(void) 470 { 471 unsigned long start, end, size = 0; 472 473 DBG(" -> finish_device_tree\n"); 474 475 #ifdef CONFIG_PPC64 476 /* Initialize virtual IRQ map */ 477 virt_irq_init(); 478 #endif 479 scan_interrupt_controllers(); 480 481 /* 482 * Finish device-tree (pre-parsing some properties etc...) 483 * We do this in 2 passes. One with "measure_only" set, which 484 * will only measure the amount of memory needed, then we can 485 * allocate that memory, and call finish_node again. However, 486 * we must be careful as most routines will fail nowadays when 487 * prom_alloc() returns 0, so we must make sure our first pass 488 * doesn't start at 0. We pre-initialize size to 16 for that 489 * reason and then remove those additional 16 bytes 490 */ 491 size = 16; 492 finish_node(allnodes, &size, 1); 493 size -= 16; 494 495 if (0 == size) 496 end = start = 0; 497 else 498 end = start = (unsigned long)__va(lmb_alloc(size, 128)); 499 500 finish_node(allnodes, &end, 0); 501 BUG_ON(end != start + size); 502 503 DBG(" <- finish_device_tree\n"); 504 } 505 506 static inline char *find_flat_dt_string(u32 offset) 507 { 508 return ((char *)initial_boot_params) + 509 initial_boot_params->off_dt_strings + offset; 510 } 511 512 /** 513 * This function is used to scan the flattened device-tree, it is 514 * used to extract the memory informations at boot before we can 515 * unflatten the tree 516 */ 517 int __init of_scan_flat_dt(int (*it)(unsigned long node, 518 const char *uname, int depth, 519 void *data), 520 void *data) 521 { 522 unsigned long p = ((unsigned long)initial_boot_params) + 523 initial_boot_params->off_dt_struct; 524 int rc = 0; 525 int depth = -1; 526 527 do { 528 u32 tag = *((u32 *)p); 529 char *pathp; 530 531 p += 4; 532 if (tag == OF_DT_END_NODE) { 533 depth --; 534 continue; 535 } 536 if (tag == OF_DT_NOP) 537 continue; 538 if (tag == OF_DT_END) 539 break; 540 if (tag == OF_DT_PROP) { 541 u32 sz = *((u32 *)p); 542 p += 8; 543 if (initial_boot_params->version < 0x10) 544 p = _ALIGN(p, sz >= 8 ? 8 : 4); 545 p += sz; 546 p = _ALIGN(p, 4); 547 continue; 548 } 549 if (tag != OF_DT_BEGIN_NODE) { 550 printk(KERN_WARNING "Invalid tag %x scanning flattened" 551 " device tree !\n", tag); 552 return -EINVAL; 553 } 554 depth++; 555 pathp = (char *)p; 556 p = _ALIGN(p + strlen(pathp) + 1, 4); 557 if ((*pathp) == '/') { 558 char *lp, *np; 559 for (lp = NULL, np = pathp; *np; np++) 560 if ((*np) == '/') 561 lp = np+1; 562 if (lp != NULL) 563 pathp = lp; 564 } 565 rc = it(p, pathp, depth, data); 566 if (rc != 0) 567 break; 568 } while(1); 569 570 return rc; 571 } 572 573 unsigned long __init of_get_flat_dt_root(void) 574 { 575 unsigned long p = ((unsigned long)initial_boot_params) + 576 initial_boot_params->off_dt_struct; 577 578 while(*((u32 *)p) == OF_DT_NOP) 579 p += 4; 580 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); 581 p += 4; 582 return _ALIGN(p + strlen((char *)p) + 1, 4); 583 } 584 585 /** 586 * This function can be used within scan_flattened_dt callback to get 587 * access to properties 588 */ 589 void* __init of_get_flat_dt_prop(unsigned long node, const char *name, 590 unsigned long *size) 591 { 592 unsigned long p = node; 593 594 do { 595 u32 tag = *((u32 *)p); 596 u32 sz, noff; 597 const char *nstr; 598 599 p += 4; 600 if (tag == OF_DT_NOP) 601 continue; 602 if (tag != OF_DT_PROP) 603 return NULL; 604 605 sz = *((u32 *)p); 606 noff = *((u32 *)(p + 4)); 607 p += 8; 608 if (initial_boot_params->version < 0x10) 609 p = _ALIGN(p, sz >= 8 ? 8 : 4); 610 611 nstr = find_flat_dt_string(noff); 612 if (nstr == NULL) { 613 printk(KERN_WARNING "Can't find property index" 614 " name !\n"); 615 return NULL; 616 } 617 if (strcmp(name, nstr) == 0) { 618 if (size) 619 *size = sz; 620 return (void *)p; 621 } 622 p += sz; 623 p = _ALIGN(p, 4); 624 } while(1); 625 } 626 627 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 628 { 629 const char* cp; 630 unsigned long cplen, l; 631 632 cp = of_get_flat_dt_prop(node, "compatible", &cplen); 633 if (cp == NULL) 634 return 0; 635 while (cplen > 0) { 636 if (strncasecmp(cp, compat, strlen(compat)) == 0) 637 return 1; 638 l = strlen(cp) + 1; 639 cp += l; 640 cplen -= l; 641 } 642 643 return 0; 644 } 645 646 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 647 unsigned long align) 648 { 649 void *res; 650 651 *mem = _ALIGN(*mem, align); 652 res = (void *)*mem; 653 *mem += size; 654 655 return res; 656 } 657 658 static unsigned long __init unflatten_dt_node(unsigned long mem, 659 unsigned long *p, 660 struct device_node *dad, 661 struct device_node ***allnextpp, 662 unsigned long fpsize) 663 { 664 struct device_node *np; 665 struct property *pp, **prev_pp = NULL; 666 char *pathp; 667 u32 tag; 668 unsigned int l, allocl; 669 int has_name = 0; 670 int new_format = 0; 671 672 tag = *((u32 *)(*p)); 673 if (tag != OF_DT_BEGIN_NODE) { 674 printk("Weird tag at start of node: %x\n", tag); 675 return mem; 676 } 677 *p += 4; 678 pathp = (char *)*p; 679 l = allocl = strlen(pathp) + 1; 680 *p = _ALIGN(*p + l, 4); 681 682 /* version 0x10 has a more compact unit name here instead of the full 683 * path. we accumulate the full path size using "fpsize", we'll rebuild 684 * it later. We detect this because the first character of the name is 685 * not '/'. 686 */ 687 if ((*pathp) != '/') { 688 new_format = 1; 689 if (fpsize == 0) { 690 /* root node: special case. fpsize accounts for path 691 * plus terminating zero. root node only has '/', so 692 * fpsize should be 2, but we want to avoid the first 693 * level nodes to have two '/' so we use fpsize 1 here 694 */ 695 fpsize = 1; 696 allocl = 2; 697 } else { 698 /* account for '/' and path size minus terminal 0 699 * already in 'l' 700 */ 701 fpsize += l; 702 allocl = fpsize; 703 } 704 } 705 706 707 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 708 __alignof__(struct device_node)); 709 if (allnextpp) { 710 memset(np, 0, sizeof(*np)); 711 np->full_name = ((char*)np) + sizeof(struct device_node); 712 if (new_format) { 713 char *p = np->full_name; 714 /* rebuild full path for new format */ 715 if (dad && dad->parent) { 716 strcpy(p, dad->full_name); 717 #ifdef DEBUG 718 if ((strlen(p) + l + 1) != allocl) { 719 DBG("%s: p: %d, l: %d, a: %d\n", 720 pathp, (int)strlen(p), l, allocl); 721 } 722 #endif 723 p += strlen(p); 724 } 725 *(p++) = '/'; 726 memcpy(p, pathp, l); 727 } else 728 memcpy(np->full_name, pathp, l); 729 prev_pp = &np->properties; 730 **allnextpp = np; 731 *allnextpp = &np->allnext; 732 if (dad != NULL) { 733 np->parent = dad; 734 /* we temporarily use the next field as `last_child'*/ 735 if (dad->next == 0) 736 dad->child = np; 737 else 738 dad->next->sibling = np; 739 dad->next = np; 740 } 741 kref_init(&np->kref); 742 } 743 while(1) { 744 u32 sz, noff; 745 char *pname; 746 747 tag = *((u32 *)(*p)); 748 if (tag == OF_DT_NOP) { 749 *p += 4; 750 continue; 751 } 752 if (tag != OF_DT_PROP) 753 break; 754 *p += 4; 755 sz = *((u32 *)(*p)); 756 noff = *((u32 *)((*p) + 4)); 757 *p += 8; 758 if (initial_boot_params->version < 0x10) 759 *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 760 761 pname = find_flat_dt_string(noff); 762 if (pname == NULL) { 763 printk("Can't find property name in list !\n"); 764 break; 765 } 766 if (strcmp(pname, "name") == 0) 767 has_name = 1; 768 l = strlen(pname) + 1; 769 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 770 __alignof__(struct property)); 771 if (allnextpp) { 772 if (strcmp(pname, "linux,phandle") == 0) { 773 np->node = *((u32 *)*p); 774 if (np->linux_phandle == 0) 775 np->linux_phandle = np->node; 776 } 777 if (strcmp(pname, "ibm,phandle") == 0) 778 np->linux_phandle = *((u32 *)*p); 779 pp->name = pname; 780 pp->length = sz; 781 pp->value = (void *)*p; 782 *prev_pp = pp; 783 prev_pp = &pp->next; 784 } 785 *p = _ALIGN((*p) + sz, 4); 786 } 787 /* with version 0x10 we may not have the name property, recreate 788 * it here from the unit name if absent 789 */ 790 if (!has_name) { 791 char *p = pathp, *ps = pathp, *pa = NULL; 792 int sz; 793 794 while (*p) { 795 if ((*p) == '@') 796 pa = p; 797 if ((*p) == '/') 798 ps = p + 1; 799 p++; 800 } 801 if (pa < ps) 802 pa = p; 803 sz = (pa - ps) + 1; 804 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 805 __alignof__(struct property)); 806 if (allnextpp) { 807 pp->name = "name"; 808 pp->length = sz; 809 pp->value = (unsigned char *)(pp + 1); 810 *prev_pp = pp; 811 prev_pp = &pp->next; 812 memcpy(pp->value, ps, sz - 1); 813 ((char *)pp->value)[sz - 1] = 0; 814 DBG("fixed up name for %s -> %s\n", pathp, pp->value); 815 } 816 } 817 if (allnextpp) { 818 *prev_pp = NULL; 819 np->name = get_property(np, "name", NULL); 820 np->type = get_property(np, "device_type", NULL); 821 822 if (!np->name) 823 np->name = "<NULL>"; 824 if (!np->type) 825 np->type = "<NULL>"; 826 } 827 while (tag == OF_DT_BEGIN_NODE) { 828 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 829 tag = *((u32 *)(*p)); 830 } 831 if (tag != OF_DT_END_NODE) { 832 printk("Weird tag at end of node: %x\n", tag); 833 return mem; 834 } 835 *p += 4; 836 return mem; 837 } 838 839 840 /** 841 * unflattens the device-tree passed by the firmware, creating the 842 * tree of struct device_node. It also fills the "name" and "type" 843 * pointers of the nodes so the normal device-tree walking functions 844 * can be used (this used to be done by finish_device_tree) 845 */ 846 void __init unflatten_device_tree(void) 847 { 848 unsigned long start, mem, size; 849 struct device_node **allnextp = &allnodes; 850 851 DBG(" -> unflatten_device_tree()\n"); 852 853 /* First pass, scan for size */ 854 start = ((unsigned long)initial_boot_params) + 855 initial_boot_params->off_dt_struct; 856 size = unflatten_dt_node(0, &start, NULL, NULL, 0); 857 size = (size | 3) + 1; 858 859 DBG(" size is %lx, allocating...\n", size); 860 861 /* Allocate memory for the expanded device tree */ 862 mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 863 mem = (unsigned long) __va(mem); 864 865 ((u32 *)mem)[size / 4] = 0xdeadbeef; 866 867 DBG(" unflattening %lx...\n", mem); 868 869 /* Second pass, do actual unflattening */ 870 start = ((unsigned long)initial_boot_params) + 871 initial_boot_params->off_dt_struct; 872 unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 873 if (*((u32 *)start) != OF_DT_END) 874 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 875 if (((u32 *)mem)[size / 4] != 0xdeadbeef) 876 printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 877 ((u32 *)mem)[size / 4] ); 878 *allnextp = NULL; 879 880 /* Get pointer to OF "/chosen" node for use everywhere */ 881 of_chosen = of_find_node_by_path("/chosen"); 882 if (of_chosen == NULL) 883 of_chosen = of_find_node_by_path("/chosen@0"); 884 885 DBG(" <- unflatten_device_tree()\n"); 886 } 887 888 /* 889 * ibm,pa-features is a per-cpu property that contains a string of 890 * attribute descriptors, each of which has a 2 byte header plus up 891 * to 254 bytes worth of processor attribute bits. First header 892 * byte specifies the number of bytes following the header. 893 * Second header byte is an "attribute-specifier" type, of which 894 * zero is the only currently-defined value. 895 * Implementation: Pass in the byte and bit offset for the feature 896 * that we are interested in. The function will return -1 if the 897 * pa-features property is missing, or a 1/0 to indicate if the feature 898 * is supported/not supported. Note that the bit numbers are 899 * big-endian to match the definition in PAPR. 900 */ 901 static struct ibm_pa_feature { 902 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 903 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 904 unsigned char pabyte; /* byte number in ibm,pa-features */ 905 unsigned char pabit; /* bit number (big-endian) */ 906 unsigned char invert; /* if 1, pa bit set => clear feature */ 907 } ibm_pa_features[] __initdata = { 908 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 909 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 910 {CPU_FTR_SLB, 0, 0, 2, 0}, 911 {CPU_FTR_CTRL, 0, 0, 3, 0}, 912 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 913 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 914 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 915 }; 916 917 static void __init check_cpu_pa_features(unsigned long node) 918 { 919 unsigned char *pa_ftrs; 920 unsigned long len, tablelen, i, bit; 921 922 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); 923 if (pa_ftrs == NULL) 924 return; 925 926 /* find descriptor with type == 0 */ 927 for (;;) { 928 if (tablelen < 3) 929 return; 930 len = 2 + pa_ftrs[0]; 931 if (tablelen < len) 932 return; /* descriptor 0 not found */ 933 if (pa_ftrs[1] == 0) 934 break; 935 tablelen -= len; 936 pa_ftrs += len; 937 } 938 939 /* loop over bits we know about */ 940 for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) { 941 struct ibm_pa_feature *fp = &ibm_pa_features[i]; 942 943 if (fp->pabyte >= pa_ftrs[0]) 944 continue; 945 bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 946 if (bit ^ fp->invert) { 947 cur_cpu_spec->cpu_features |= fp->cpu_features; 948 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 949 } else { 950 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 951 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 952 } 953 } 954 } 955 956 static int __init early_init_dt_scan_cpus(unsigned long node, 957 const char *uname, int depth, 958 void *data) 959 { 960 static int logical_cpuid = 0; 961 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 962 #ifdef CONFIG_ALTIVEC 963 u32 *prop; 964 #endif 965 u32 *intserv; 966 int i, nthreads; 967 unsigned long len; 968 int found = 0; 969 970 /* We are scanning "cpu" nodes only */ 971 if (type == NULL || strcmp(type, "cpu") != 0) 972 return 0; 973 974 /* Get physical cpuid */ 975 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); 976 if (intserv) { 977 nthreads = len / sizeof(int); 978 } else { 979 intserv = of_get_flat_dt_prop(node, "reg", NULL); 980 nthreads = 1; 981 } 982 983 /* 984 * Now see if any of these threads match our boot cpu. 985 * NOTE: This must match the parsing done in smp_setup_cpu_maps. 986 */ 987 for (i = 0; i < nthreads; i++) { 988 /* 989 * version 2 of the kexec param format adds the phys cpuid of 990 * booted proc. 991 */ 992 if (initial_boot_params && initial_boot_params->version >= 2) { 993 if (intserv[i] == 994 initial_boot_params->boot_cpuid_phys) { 995 found = 1; 996 break; 997 } 998 } else { 999 /* 1000 * Check if it's the boot-cpu, set it's hw index now, 1001 * unfortunately this format did not support booting 1002 * off secondary threads. 1003 */ 1004 if (of_get_flat_dt_prop(node, 1005 "linux,boot-cpu", NULL) != NULL) { 1006 found = 1; 1007 break; 1008 } 1009 } 1010 1011 #ifdef CONFIG_SMP 1012 /* logical cpu id is always 0 on UP kernels */ 1013 logical_cpuid++; 1014 #endif 1015 } 1016 1017 if (found) { 1018 DBG("boot cpu: logical %d physical %d\n", logical_cpuid, 1019 intserv[i]); 1020 boot_cpuid = logical_cpuid; 1021 set_hard_smp_processor_id(boot_cpuid, intserv[i]); 1022 } 1023 1024 #ifdef CONFIG_ALTIVEC 1025 /* Check if we have a VMX and eventually update CPU features */ 1026 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); 1027 if (prop && (*prop) > 0) { 1028 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1029 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1030 } 1031 1032 /* Same goes for Apple's "altivec" property */ 1033 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); 1034 if (prop) { 1035 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1036 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1037 } 1038 #endif /* CONFIG_ALTIVEC */ 1039 1040 check_cpu_pa_features(node); 1041 1042 #ifdef CONFIG_PPC_PSERIES 1043 if (nthreads > 1) 1044 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 1045 else 1046 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 1047 #endif 1048 1049 return 0; 1050 } 1051 1052 static int __init early_init_dt_scan_chosen(unsigned long node, 1053 const char *uname, int depth, void *data) 1054 { 1055 unsigned long *lprop; 1056 unsigned long l; 1057 char *p; 1058 1059 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1060 1061 if (depth != 1 || 1062 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 1063 return 0; 1064 1065 #ifdef CONFIG_PPC64 1066 /* check if iommu is forced on or off */ 1067 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 1068 iommu_is_off = 1; 1069 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 1070 iommu_force_on = 1; 1071 #endif 1072 1073 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 1074 if (lprop) 1075 memory_limit = *lprop; 1076 1077 #ifdef CONFIG_PPC64 1078 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 1079 if (lprop) 1080 tce_alloc_start = *lprop; 1081 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 1082 if (lprop) 1083 tce_alloc_end = *lprop; 1084 #endif 1085 1086 #ifdef CONFIG_PPC_RTAS 1087 /* To help early debugging via the front panel, we retrieve a minimal 1088 * set of RTAS infos now if available 1089 */ 1090 { 1091 u64 *basep, *entryp, *sizep; 1092 1093 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); 1094 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); 1095 sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); 1096 if (basep && entryp && sizep) { 1097 rtas.base = *basep; 1098 rtas.entry = *entryp; 1099 rtas.size = *sizep; 1100 } 1101 } 1102 #endif /* CONFIG_PPC_RTAS */ 1103 1104 #ifdef CONFIG_KEXEC 1105 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 1106 if (lprop) 1107 crashk_res.start = *lprop; 1108 1109 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 1110 if (lprop) 1111 crashk_res.end = crashk_res.start + *lprop - 1; 1112 #endif 1113 1114 /* Retreive command line */ 1115 p = of_get_flat_dt_prop(node, "bootargs", &l); 1116 if (p != NULL && l > 0) 1117 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 1118 1119 #ifdef CONFIG_CMDLINE 1120 if (l == 0 || (l == 1 && (*p) == 0)) 1121 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1122 #endif /* CONFIG_CMDLINE */ 1123 1124 DBG("Command line is: %s\n", cmd_line); 1125 1126 if (strstr(cmd_line, "mem=")) { 1127 char *p, *q; 1128 1129 for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { 1130 q = p + 4; 1131 if (p > cmd_line && p[-1] != ' ') 1132 continue; 1133 memory_limit = memparse(q, &q); 1134 } 1135 } 1136 1137 /* break now */ 1138 return 1; 1139 } 1140 1141 static int __init early_init_dt_scan_root(unsigned long node, 1142 const char *uname, int depth, void *data) 1143 { 1144 u32 *prop; 1145 1146 if (depth != 0) 1147 return 0; 1148 1149 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 1150 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1151 DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 1152 1153 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 1154 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1155 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1156 1157 /* break now */ 1158 return 1; 1159 } 1160 1161 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) 1162 { 1163 cell_t *p = *cellp; 1164 unsigned long r; 1165 1166 /* Ignore more than 2 cells */ 1167 while (s > sizeof(unsigned long) / 4) { 1168 p++; 1169 s--; 1170 } 1171 r = *p++; 1172 #ifdef CONFIG_PPC64 1173 if (s > 1) { 1174 r <<= 32; 1175 r |= *(p++); 1176 s--; 1177 } 1178 #endif 1179 1180 *cellp = p; 1181 return r; 1182 } 1183 1184 1185 static int __init early_init_dt_scan_memory(unsigned long node, 1186 const char *uname, int depth, void *data) 1187 { 1188 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 1189 cell_t *reg, *endp; 1190 unsigned long l; 1191 1192 /* We are scanning "memory" nodes only */ 1193 if (type == NULL) { 1194 /* 1195 * The longtrail doesn't have a device_type on the 1196 * /memory node, so look for the node called /memory@0. 1197 */ 1198 if (depth != 1 || strcmp(uname, "memory@0") != 0) 1199 return 0; 1200 } else if (strcmp(type, "memory") != 0) 1201 return 0; 1202 1203 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 1204 if (reg == NULL) 1205 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 1206 if (reg == NULL) 1207 return 0; 1208 1209 endp = reg + (l / sizeof(cell_t)); 1210 1211 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 1212 uname, l, reg[0], reg[1], reg[2], reg[3]); 1213 1214 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1215 unsigned long base, size; 1216 1217 base = dt_mem_next_cell(dt_root_addr_cells, ®); 1218 size = dt_mem_next_cell(dt_root_size_cells, ®); 1219 1220 if (size == 0) 1221 continue; 1222 DBG(" - %lx , %lx\n", base, size); 1223 #ifdef CONFIG_PPC64 1224 if (iommu_is_off) { 1225 if (base >= 0x80000000ul) 1226 continue; 1227 if ((base + size) > 0x80000000ul) 1228 size = 0x80000000ul - base; 1229 } 1230 #endif 1231 lmb_add(base, size); 1232 } 1233 return 0; 1234 } 1235 1236 static void __init early_reserve_mem(void) 1237 { 1238 u64 base, size; 1239 u64 *reserve_map; 1240 1241 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 1242 initial_boot_params->off_mem_rsvmap); 1243 #ifdef CONFIG_PPC32 1244 /* 1245 * Handle the case where we might be booting from an old kexec 1246 * image that setup the mem_rsvmap as pairs of 32-bit values 1247 */ 1248 if (*reserve_map > 0xffffffffull) { 1249 u32 base_32, size_32; 1250 u32 *reserve_map_32 = (u32 *)reserve_map; 1251 1252 while (1) { 1253 base_32 = *(reserve_map_32++); 1254 size_32 = *(reserve_map_32++); 1255 if (size_32 == 0) 1256 break; 1257 DBG("reserving: %x -> %x\n", base_32, size_32); 1258 lmb_reserve(base_32, size_32); 1259 } 1260 return; 1261 } 1262 #endif 1263 while (1) { 1264 base = *(reserve_map++); 1265 size = *(reserve_map++); 1266 if (size == 0) 1267 break; 1268 DBG("reserving: %llx -> %llx\n", base, size); 1269 lmb_reserve(base, size); 1270 } 1271 1272 #if 0 1273 DBG("memory reserved, lmbs :\n"); 1274 lmb_dump_all(); 1275 #endif 1276 } 1277 1278 void __init early_init_devtree(void *params) 1279 { 1280 DBG(" -> early_init_devtree()\n"); 1281 1282 /* Setup flat device-tree pointer */ 1283 initial_boot_params = params; 1284 1285 /* Retrieve various informations from the /chosen node of the 1286 * device-tree, including the platform type, initrd location and 1287 * size, TCE reserve, and more ... 1288 */ 1289 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 1290 1291 /* Scan memory nodes and rebuild LMBs */ 1292 lmb_init(); 1293 of_scan_flat_dt(early_init_dt_scan_root, NULL); 1294 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1295 lmb_enforce_memory_limit(memory_limit); 1296 lmb_analyze(); 1297 1298 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1299 1300 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 1301 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 1302 #ifdef CONFIG_CRASH_DUMP 1303 lmb_reserve(0, KDUMP_RESERVE_LIMIT); 1304 #endif 1305 early_reserve_mem(); 1306 1307 DBG("Scanning CPUs ...\n"); 1308 1309 /* Retreive CPU related informations from the flat tree 1310 * (altivec support, boot CPU ID, ...) 1311 */ 1312 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 1313 1314 DBG(" <- early_init_devtree()\n"); 1315 } 1316 1317 #undef printk 1318 1319 int 1320 prom_n_addr_cells(struct device_node* np) 1321 { 1322 int* ip; 1323 do { 1324 if (np->parent) 1325 np = np->parent; 1326 ip = (int *) get_property(np, "#address-cells", NULL); 1327 if (ip != NULL) 1328 return *ip; 1329 } while (np->parent); 1330 /* No #address-cells property for the root node, default to 1 */ 1331 return 1; 1332 } 1333 EXPORT_SYMBOL(prom_n_addr_cells); 1334 1335 int 1336 prom_n_size_cells(struct device_node* np) 1337 { 1338 int* ip; 1339 do { 1340 if (np->parent) 1341 np = np->parent; 1342 ip = (int *) get_property(np, "#size-cells", NULL); 1343 if (ip != NULL) 1344 return *ip; 1345 } while (np->parent); 1346 /* No #size-cells property for the root node, default to 1 */ 1347 return 1; 1348 } 1349 EXPORT_SYMBOL(prom_n_size_cells); 1350 1351 /** 1352 * Work out the sense (active-low level / active-high edge) 1353 * of each interrupt from the device tree. 1354 */ 1355 void __init prom_get_irq_senses(unsigned char *senses, int off, int max) 1356 { 1357 struct device_node *np; 1358 int i, j; 1359 1360 /* default to level-triggered */ 1361 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off); 1362 1363 for (np = allnodes; np != 0; np = np->allnext) { 1364 for (j = 0; j < np->n_intrs; j++) { 1365 i = np->intrs[j].line; 1366 if (i >= off && i < max) 1367 senses[i-off] = np->intrs[j].sense; 1368 } 1369 } 1370 } 1371 1372 /** 1373 * Construct and return a list of the device_nodes with a given name. 1374 */ 1375 struct device_node *find_devices(const char *name) 1376 { 1377 struct device_node *head, **prevp, *np; 1378 1379 prevp = &head; 1380 for (np = allnodes; np != 0; np = np->allnext) { 1381 if (np->name != 0 && strcasecmp(np->name, name) == 0) { 1382 *prevp = np; 1383 prevp = &np->next; 1384 } 1385 } 1386 *prevp = NULL; 1387 return head; 1388 } 1389 EXPORT_SYMBOL(find_devices); 1390 1391 /** 1392 * Construct and return a list of the device_nodes with a given type. 1393 */ 1394 struct device_node *find_type_devices(const char *type) 1395 { 1396 struct device_node *head, **prevp, *np; 1397 1398 prevp = &head; 1399 for (np = allnodes; np != 0; np = np->allnext) { 1400 if (np->type != 0 && strcasecmp(np->type, type) == 0) { 1401 *prevp = np; 1402 prevp = &np->next; 1403 } 1404 } 1405 *prevp = NULL; 1406 return head; 1407 } 1408 EXPORT_SYMBOL(find_type_devices); 1409 1410 /** 1411 * Returns all nodes linked together 1412 */ 1413 struct device_node *find_all_nodes(void) 1414 { 1415 struct device_node *head, **prevp, *np; 1416 1417 prevp = &head; 1418 for (np = allnodes; np != 0; np = np->allnext) { 1419 *prevp = np; 1420 prevp = &np->next; 1421 } 1422 *prevp = NULL; 1423 return head; 1424 } 1425 EXPORT_SYMBOL(find_all_nodes); 1426 1427 /** Checks if the given "compat" string matches one of the strings in 1428 * the device's "compatible" property 1429 */ 1430 int device_is_compatible(struct device_node *device, const char *compat) 1431 { 1432 const char* cp; 1433 int cplen, l; 1434 1435 cp = (char *) get_property(device, "compatible", &cplen); 1436 if (cp == NULL) 1437 return 0; 1438 while (cplen > 0) { 1439 if (strncasecmp(cp, compat, strlen(compat)) == 0) 1440 return 1; 1441 l = strlen(cp) + 1; 1442 cp += l; 1443 cplen -= l; 1444 } 1445 1446 return 0; 1447 } 1448 EXPORT_SYMBOL(device_is_compatible); 1449 1450 1451 /** 1452 * Indicates whether the root node has a given value in its 1453 * compatible property. 1454 */ 1455 int machine_is_compatible(const char *compat) 1456 { 1457 struct device_node *root; 1458 int rc = 0; 1459 1460 root = of_find_node_by_path("/"); 1461 if (root) { 1462 rc = device_is_compatible(root, compat); 1463 of_node_put(root); 1464 } 1465 return rc; 1466 } 1467 EXPORT_SYMBOL(machine_is_compatible); 1468 1469 /** 1470 * Construct and return a list of the device_nodes with a given type 1471 * and compatible property. 1472 */ 1473 struct device_node *find_compatible_devices(const char *type, 1474 const char *compat) 1475 { 1476 struct device_node *head, **prevp, *np; 1477 1478 prevp = &head; 1479 for (np = allnodes; np != 0; np = np->allnext) { 1480 if (type != NULL 1481 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1482 continue; 1483 if (device_is_compatible(np, compat)) { 1484 *prevp = np; 1485 prevp = &np->next; 1486 } 1487 } 1488 *prevp = NULL; 1489 return head; 1490 } 1491 EXPORT_SYMBOL(find_compatible_devices); 1492 1493 /** 1494 * Find the device_node with a given full_name. 1495 */ 1496 struct device_node *find_path_device(const char *path) 1497 { 1498 struct device_node *np; 1499 1500 for (np = allnodes; np != 0; np = np->allnext) 1501 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0) 1502 return np; 1503 return NULL; 1504 } 1505 EXPORT_SYMBOL(find_path_device); 1506 1507 /******* 1508 * 1509 * New implementation of the OF "find" APIs, return a refcounted 1510 * object, call of_node_put() when done. The device tree and list 1511 * are protected by a rw_lock. 1512 * 1513 * Note that property management will need some locking as well, 1514 * this isn't dealt with yet. 1515 * 1516 *******/ 1517 1518 /** 1519 * of_find_node_by_name - Find a node by its "name" property 1520 * @from: The node to start searching from or NULL, the node 1521 * you pass will not be searched, only the next one 1522 * will; typically, you pass what the previous call 1523 * returned. of_node_put() will be called on it 1524 * @name: The name string to match against 1525 * 1526 * Returns a node pointer with refcount incremented, use 1527 * of_node_put() on it when done. 1528 */ 1529 struct device_node *of_find_node_by_name(struct device_node *from, 1530 const char *name) 1531 { 1532 struct device_node *np; 1533 1534 read_lock(&devtree_lock); 1535 np = from ? from->allnext : allnodes; 1536 for (; np != NULL; np = np->allnext) 1537 if (np->name != NULL && strcasecmp(np->name, name) == 0 1538 && of_node_get(np)) 1539 break; 1540 if (from) 1541 of_node_put(from); 1542 read_unlock(&devtree_lock); 1543 return np; 1544 } 1545 EXPORT_SYMBOL(of_find_node_by_name); 1546 1547 /** 1548 * of_find_node_by_type - Find a node by its "device_type" property 1549 * @from: The node to start searching from or NULL, the node 1550 * you pass will not be searched, only the next one 1551 * will; typically, you pass what the previous call 1552 * returned. of_node_put() will be called on it 1553 * @name: The type string to match against 1554 * 1555 * Returns a node pointer with refcount incremented, use 1556 * of_node_put() on it when done. 1557 */ 1558 struct device_node *of_find_node_by_type(struct device_node *from, 1559 const char *type) 1560 { 1561 struct device_node *np; 1562 1563 read_lock(&devtree_lock); 1564 np = from ? from->allnext : allnodes; 1565 for (; np != 0; np = np->allnext) 1566 if (np->type != 0 && strcasecmp(np->type, type) == 0 1567 && of_node_get(np)) 1568 break; 1569 if (from) 1570 of_node_put(from); 1571 read_unlock(&devtree_lock); 1572 return np; 1573 } 1574 EXPORT_SYMBOL(of_find_node_by_type); 1575 1576 /** 1577 * of_find_compatible_node - Find a node based on type and one of the 1578 * tokens in its "compatible" property 1579 * @from: The node to start searching from or NULL, the node 1580 * you pass will not be searched, only the next one 1581 * will; typically, you pass what the previous call 1582 * returned. of_node_put() will be called on it 1583 * @type: The type string to match "device_type" or NULL to ignore 1584 * @compatible: The string to match to one of the tokens in the device 1585 * "compatible" list. 1586 * 1587 * Returns a node pointer with refcount incremented, use 1588 * of_node_put() on it when done. 1589 */ 1590 struct device_node *of_find_compatible_node(struct device_node *from, 1591 const char *type, const char *compatible) 1592 { 1593 struct device_node *np; 1594 1595 read_lock(&devtree_lock); 1596 np = from ? from->allnext : allnodes; 1597 for (; np != 0; np = np->allnext) { 1598 if (type != NULL 1599 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1600 continue; 1601 if (device_is_compatible(np, compatible) && of_node_get(np)) 1602 break; 1603 } 1604 if (from) 1605 of_node_put(from); 1606 read_unlock(&devtree_lock); 1607 return np; 1608 } 1609 EXPORT_SYMBOL(of_find_compatible_node); 1610 1611 /** 1612 * of_find_node_by_path - Find a node matching a full OF path 1613 * @path: The full path to match 1614 * 1615 * Returns a node pointer with refcount incremented, use 1616 * of_node_put() on it when done. 1617 */ 1618 struct device_node *of_find_node_by_path(const char *path) 1619 { 1620 struct device_node *np = allnodes; 1621 1622 read_lock(&devtree_lock); 1623 for (; np != 0; np = np->allnext) { 1624 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1625 && of_node_get(np)) 1626 break; 1627 } 1628 read_unlock(&devtree_lock); 1629 return np; 1630 } 1631 EXPORT_SYMBOL(of_find_node_by_path); 1632 1633 /** 1634 * of_find_node_by_phandle - Find a node given a phandle 1635 * @handle: phandle of the node to find 1636 * 1637 * Returns a node pointer with refcount incremented, use 1638 * of_node_put() on it when done. 1639 */ 1640 struct device_node *of_find_node_by_phandle(phandle handle) 1641 { 1642 struct device_node *np; 1643 1644 read_lock(&devtree_lock); 1645 for (np = allnodes; np != 0; np = np->allnext) 1646 if (np->linux_phandle == handle) 1647 break; 1648 if (np) 1649 of_node_get(np); 1650 read_unlock(&devtree_lock); 1651 return np; 1652 } 1653 EXPORT_SYMBOL(of_find_node_by_phandle); 1654 1655 /** 1656 * of_find_all_nodes - Get next node in global list 1657 * @prev: Previous node or NULL to start iteration 1658 * of_node_put() will be called on it 1659 * 1660 * Returns a node pointer with refcount incremented, use 1661 * of_node_put() on it when done. 1662 */ 1663 struct device_node *of_find_all_nodes(struct device_node *prev) 1664 { 1665 struct device_node *np; 1666 1667 read_lock(&devtree_lock); 1668 np = prev ? prev->allnext : allnodes; 1669 for (; np != 0; np = np->allnext) 1670 if (of_node_get(np)) 1671 break; 1672 if (prev) 1673 of_node_put(prev); 1674 read_unlock(&devtree_lock); 1675 return np; 1676 } 1677 EXPORT_SYMBOL(of_find_all_nodes); 1678 1679 /** 1680 * of_get_parent - Get a node's parent if any 1681 * @node: Node to get parent 1682 * 1683 * Returns a node pointer with refcount incremented, use 1684 * of_node_put() on it when done. 1685 */ 1686 struct device_node *of_get_parent(const struct device_node *node) 1687 { 1688 struct device_node *np; 1689 1690 if (!node) 1691 return NULL; 1692 1693 read_lock(&devtree_lock); 1694 np = of_node_get(node->parent); 1695 read_unlock(&devtree_lock); 1696 return np; 1697 } 1698 EXPORT_SYMBOL(of_get_parent); 1699 1700 /** 1701 * of_get_next_child - Iterate a node childs 1702 * @node: parent node 1703 * @prev: previous child of the parent node, or NULL to get first 1704 * 1705 * Returns a node pointer with refcount incremented, use 1706 * of_node_put() on it when done. 1707 */ 1708 struct device_node *of_get_next_child(const struct device_node *node, 1709 struct device_node *prev) 1710 { 1711 struct device_node *next; 1712 1713 read_lock(&devtree_lock); 1714 next = prev ? prev->sibling : node->child; 1715 for (; next != 0; next = next->sibling) 1716 if (of_node_get(next)) 1717 break; 1718 if (prev) 1719 of_node_put(prev); 1720 read_unlock(&devtree_lock); 1721 return next; 1722 } 1723 EXPORT_SYMBOL(of_get_next_child); 1724 1725 /** 1726 * of_node_get - Increment refcount of a node 1727 * @node: Node to inc refcount, NULL is supported to 1728 * simplify writing of callers 1729 * 1730 * Returns node. 1731 */ 1732 struct device_node *of_node_get(struct device_node *node) 1733 { 1734 if (node) 1735 kref_get(&node->kref); 1736 return node; 1737 } 1738 EXPORT_SYMBOL(of_node_get); 1739 1740 static inline struct device_node * kref_to_device_node(struct kref *kref) 1741 { 1742 return container_of(kref, struct device_node, kref); 1743 } 1744 1745 /** 1746 * of_node_release - release a dynamically allocated node 1747 * @kref: kref element of the node to be released 1748 * 1749 * In of_node_put() this function is passed to kref_put() 1750 * as the destructor. 1751 */ 1752 static void of_node_release(struct kref *kref) 1753 { 1754 struct device_node *node = kref_to_device_node(kref); 1755 struct property *prop = node->properties; 1756 1757 if (!OF_IS_DYNAMIC(node)) 1758 return; 1759 while (prop) { 1760 struct property *next = prop->next; 1761 kfree(prop->name); 1762 kfree(prop->value); 1763 kfree(prop); 1764 prop = next; 1765 1766 if (!prop) { 1767 prop = node->deadprops; 1768 node->deadprops = NULL; 1769 } 1770 } 1771 kfree(node->intrs); 1772 kfree(node->full_name); 1773 kfree(node->data); 1774 kfree(node); 1775 } 1776 1777 /** 1778 * of_node_put - Decrement refcount of a node 1779 * @node: Node to dec refcount, NULL is supported to 1780 * simplify writing of callers 1781 * 1782 */ 1783 void of_node_put(struct device_node *node) 1784 { 1785 if (node) 1786 kref_put(&node->kref, of_node_release); 1787 } 1788 EXPORT_SYMBOL(of_node_put); 1789 1790 /* 1791 * Plug a device node into the tree and global list. 1792 */ 1793 void of_attach_node(struct device_node *np) 1794 { 1795 write_lock(&devtree_lock); 1796 np->sibling = np->parent->child; 1797 np->allnext = allnodes; 1798 np->parent->child = np; 1799 allnodes = np; 1800 write_unlock(&devtree_lock); 1801 } 1802 1803 /* 1804 * "Unplug" a node from the device tree. The caller must hold 1805 * a reference to the node. The memory associated with the node 1806 * is not freed until its refcount goes to zero. 1807 */ 1808 void of_detach_node(const struct device_node *np) 1809 { 1810 struct device_node *parent; 1811 1812 write_lock(&devtree_lock); 1813 1814 parent = np->parent; 1815 1816 if (allnodes == np) 1817 allnodes = np->allnext; 1818 else { 1819 struct device_node *prev; 1820 for (prev = allnodes; 1821 prev->allnext != np; 1822 prev = prev->allnext) 1823 ; 1824 prev->allnext = np->allnext; 1825 } 1826 1827 if (parent->child == np) 1828 parent->child = np->sibling; 1829 else { 1830 struct device_node *prevsib; 1831 for (prevsib = np->parent->child; 1832 prevsib->sibling != np; 1833 prevsib = prevsib->sibling) 1834 ; 1835 prevsib->sibling = np->sibling; 1836 } 1837 1838 write_unlock(&devtree_lock); 1839 } 1840 1841 #ifdef CONFIG_PPC_PSERIES 1842 /* 1843 * Fix up the uninitialized fields in a new device node: 1844 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields 1845 * 1846 * A lot of boot-time code is duplicated here, because functions such 1847 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the 1848 * slab allocator. 1849 * 1850 * This should probably be split up into smaller chunks. 1851 */ 1852 1853 static int of_finish_dynamic_node(struct device_node *node) 1854 { 1855 struct device_node *parent = of_get_parent(node); 1856 int err = 0; 1857 phandle *ibm_phandle; 1858 1859 node->name = get_property(node, "name", NULL); 1860 node->type = get_property(node, "device_type", NULL); 1861 1862 if (!parent) { 1863 err = -ENODEV; 1864 goto out; 1865 } 1866 1867 /* We don't support that function on PowerMac, at least 1868 * not yet 1869 */ 1870 if (machine_is(powermac)) 1871 return -ENODEV; 1872 1873 /* fix up new node's linux_phandle field */ 1874 if ((ibm_phandle = (unsigned int *)get_property(node, 1875 "ibm,phandle", NULL))) 1876 node->linux_phandle = *ibm_phandle; 1877 1878 out: 1879 of_node_put(parent); 1880 return err; 1881 } 1882 1883 static int prom_reconfig_notifier(struct notifier_block *nb, 1884 unsigned long action, void *node) 1885 { 1886 int err; 1887 1888 switch (action) { 1889 case PSERIES_RECONFIG_ADD: 1890 err = of_finish_dynamic_node(node); 1891 if (!err) 1892 finish_node(node, NULL, 0); 1893 if (err < 0) { 1894 printk(KERN_ERR "finish_node returned %d\n", err); 1895 err = NOTIFY_BAD; 1896 } 1897 break; 1898 default: 1899 err = NOTIFY_DONE; 1900 break; 1901 } 1902 return err; 1903 } 1904 1905 static struct notifier_block prom_reconfig_nb = { 1906 .notifier_call = prom_reconfig_notifier, 1907 .priority = 10, /* This one needs to run first */ 1908 }; 1909 1910 static int __init prom_reconfig_setup(void) 1911 { 1912 return pSeries_reconfig_notifier_register(&prom_reconfig_nb); 1913 } 1914 __initcall(prom_reconfig_setup); 1915 #endif 1916 1917 struct property *of_find_property(struct device_node *np, const char *name, 1918 int *lenp) 1919 { 1920 struct property *pp; 1921 1922 read_lock(&devtree_lock); 1923 for (pp = np->properties; pp != 0; pp = pp->next) 1924 if (strcmp(pp->name, name) == 0) { 1925 if (lenp != 0) 1926 *lenp = pp->length; 1927 break; 1928 } 1929 read_unlock(&devtree_lock); 1930 1931 return pp; 1932 } 1933 1934 /* 1935 * Find a property with a given name for a given node 1936 * and return the value. 1937 */ 1938 unsigned char *get_property(struct device_node *np, const char *name, 1939 int *lenp) 1940 { 1941 struct property *pp = of_find_property(np,name,lenp); 1942 return pp ? pp->value : NULL; 1943 } 1944 EXPORT_SYMBOL(get_property); 1945 1946 /* 1947 * Add a property to a node 1948 */ 1949 int prom_add_property(struct device_node* np, struct property* prop) 1950 { 1951 struct property **next; 1952 1953 prop->next = NULL; 1954 write_lock(&devtree_lock); 1955 next = &np->properties; 1956 while (*next) { 1957 if (strcmp(prop->name, (*next)->name) == 0) { 1958 /* duplicate ! don't insert it */ 1959 write_unlock(&devtree_lock); 1960 return -1; 1961 } 1962 next = &(*next)->next; 1963 } 1964 *next = prop; 1965 write_unlock(&devtree_lock); 1966 1967 #ifdef CONFIG_PROC_DEVICETREE 1968 /* try to add to proc as well if it was initialized */ 1969 if (np->pde) 1970 proc_device_tree_add_prop(np->pde, prop); 1971 #endif /* CONFIG_PROC_DEVICETREE */ 1972 1973 return 0; 1974 } 1975 1976 /* 1977 * Remove a property from a node. Note that we don't actually 1978 * remove it, since we have given out who-knows-how-many pointers 1979 * to the data using get-property. Instead we just move the property 1980 * to the "dead properties" list, so it won't be found any more. 1981 */ 1982 int prom_remove_property(struct device_node *np, struct property *prop) 1983 { 1984 struct property **next; 1985 int found = 0; 1986 1987 write_lock(&devtree_lock); 1988 next = &np->properties; 1989 while (*next) { 1990 if (*next == prop) { 1991 /* found the node */ 1992 *next = prop->next; 1993 prop->next = np->deadprops; 1994 np->deadprops = prop; 1995 found = 1; 1996 break; 1997 } 1998 next = &(*next)->next; 1999 } 2000 write_unlock(&devtree_lock); 2001 2002 if (!found) 2003 return -ENODEV; 2004 2005 #ifdef CONFIG_PROC_DEVICETREE 2006 /* try to remove the proc node as well */ 2007 if (np->pde) 2008 proc_device_tree_remove_prop(np->pde, prop); 2009 #endif /* CONFIG_PROC_DEVICETREE */ 2010 2011 return 0; 2012 } 2013 2014 /* 2015 * Update a property in a node. Note that we don't actually 2016 * remove it, since we have given out who-knows-how-many pointers 2017 * to the data using get-property. Instead we just move the property 2018 * to the "dead properties" list, and add the new property to the 2019 * property list 2020 */ 2021 int prom_update_property(struct device_node *np, 2022 struct property *newprop, 2023 struct property *oldprop) 2024 { 2025 struct property **next; 2026 int found = 0; 2027 2028 write_lock(&devtree_lock); 2029 next = &np->properties; 2030 while (*next) { 2031 if (*next == oldprop) { 2032 /* found the node */ 2033 newprop->next = oldprop->next; 2034 *next = newprop; 2035 oldprop->next = np->deadprops; 2036 np->deadprops = oldprop; 2037 found = 1; 2038 break; 2039 } 2040 next = &(*next)->next; 2041 } 2042 write_unlock(&devtree_lock); 2043 2044 if (!found) 2045 return -ENODEV; 2046 2047 #ifdef CONFIG_PROC_DEVICETREE 2048 /* try to add to proc as well if it was initialized */ 2049 if (np->pde) 2050 proc_device_tree_update_prop(np->pde, newprop, oldprop); 2051 #endif /* CONFIG_PROC_DEVICETREE */ 2052 2053 return 0; 2054 } 2055 2056 #ifdef CONFIG_KEXEC 2057 /* We may have allocated the flat device tree inside the crash kernel region 2058 * in prom_init. If so we need to move it out into regular memory. */ 2059 void kdump_move_device_tree(void) 2060 { 2061 unsigned long start, end; 2062 struct boot_param_header *new; 2063 2064 start = __pa((unsigned long)initial_boot_params); 2065 end = start + initial_boot_params->totalsize; 2066 2067 if (end < crashk_res.start || start > crashk_res.end) 2068 return; 2069 2070 new = (struct boot_param_header*) 2071 __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE)); 2072 2073 memcpy(new, initial_boot_params, initial_boot_params->totalsize); 2074 2075 initial_boot_params = new; 2076 2077 DBG("Flat device tree blob moved to %p\n", initial_boot_params); 2078 2079 /* XXX should we unreserve the old DT? */ 2080 } 2081 #endif /* CONFIG_KEXEC */ 2082