1 /* 2 * Procedures for creating, accessing and interpreting the device tree. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG 17 18 #include <stdarg.h> 19 #include <linux/config.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/init.h> 23 #include <linux/threads.h> 24 #include <linux/spinlock.h> 25 #include <linux/types.h> 26 #include <linux/pci.h> 27 #include <linux/stringify.h> 28 #include <linux/delay.h> 29 #include <linux/initrd.h> 30 #include <linux/bitops.h> 31 #include <linux/module.h> 32 #include <linux/kexec.h> 33 #include <linux/debugfs.h> 34 35 #include <asm/prom.h> 36 #include <asm/rtas.h> 37 #include <asm/lmb.h> 38 #include <asm/page.h> 39 #include <asm/processor.h> 40 #include <asm/irq.h> 41 #include <asm/io.h> 42 #include <asm/kdump.h> 43 #include <asm/smp.h> 44 #include <asm/system.h> 45 #include <asm/mmu.h> 46 #include <asm/pgtable.h> 47 #include <asm/pci.h> 48 #include <asm/iommu.h> 49 #include <asm/btext.h> 50 #include <asm/sections.h> 51 #include <asm/machdep.h> 52 #include <asm/pSeries_reconfig.h> 53 #include <asm/pci-bridge.h> 54 #include <asm/kexec.h> 55 56 #ifdef DEBUG 57 #define DBG(fmt...) printk(KERN_ERR fmt) 58 #else 59 #define DBG(fmt...) 60 #endif 61 62 63 static int __initdata dt_root_addr_cells; 64 static int __initdata dt_root_size_cells; 65 66 #ifdef CONFIG_PPC64 67 int __initdata iommu_is_off; 68 int __initdata iommu_force_on; 69 unsigned long tce_alloc_start, tce_alloc_end; 70 #endif 71 72 typedef u32 cell_t; 73 74 #if 0 75 static struct boot_param_header *initial_boot_params __initdata; 76 #else 77 struct boot_param_header *initial_boot_params; 78 #endif 79 80 static struct device_node *allnodes = NULL; 81 82 /* use when traversing tree through the allnext, child, sibling, 83 * or parent members of struct device_node. 84 */ 85 static DEFINE_RWLOCK(devtree_lock); 86 87 /* export that to outside world */ 88 struct device_node *of_chosen; 89 90 struct device_node *dflt_interrupt_controller; 91 int num_interrupt_controllers; 92 93 /* 94 * Wrapper for allocating memory for various data that needs to be 95 * attached to device nodes as they are processed at boot or when 96 * added to the device tree later (e.g. DLPAR). At boot there is 97 * already a region reserved so we just increment *mem_start by size; 98 * otherwise we call kmalloc. 99 */ 100 static void * prom_alloc(unsigned long size, unsigned long *mem_start) 101 { 102 unsigned long tmp; 103 104 if (!mem_start) 105 return kmalloc(size, GFP_KERNEL); 106 107 tmp = *mem_start; 108 *mem_start += size; 109 return (void *)tmp; 110 } 111 112 /* 113 * Find the device_node with a given phandle. 114 */ 115 static struct device_node * find_phandle(phandle ph) 116 { 117 struct device_node *np; 118 119 for (np = allnodes; np != 0; np = np->allnext) 120 if (np->linux_phandle == ph) 121 return np; 122 return NULL; 123 } 124 125 /* 126 * Find the interrupt parent of a node. 127 */ 128 static struct device_node * __devinit intr_parent(struct device_node *p) 129 { 130 phandle *parp; 131 132 parp = (phandle *) get_property(p, "interrupt-parent", NULL); 133 if (parp == NULL) 134 return p->parent; 135 p = find_phandle(*parp); 136 if (p != NULL) 137 return p; 138 /* 139 * On a powermac booted with BootX, we don't get to know the 140 * phandles for any nodes, so find_phandle will return NULL. 141 * Fortunately these machines only have one interrupt controller 142 * so there isn't in fact any ambiguity. -- paulus 143 */ 144 if (num_interrupt_controllers == 1) 145 p = dflt_interrupt_controller; 146 return p; 147 } 148 149 /* 150 * Find out the size of each entry of the interrupts property 151 * for a node. 152 */ 153 int __devinit prom_n_intr_cells(struct device_node *np) 154 { 155 struct device_node *p; 156 unsigned int *icp; 157 158 for (p = np; (p = intr_parent(p)) != NULL; ) { 159 icp = (unsigned int *) 160 get_property(p, "#interrupt-cells", NULL); 161 if (icp != NULL) 162 return *icp; 163 if (get_property(p, "interrupt-controller", NULL) != NULL 164 || get_property(p, "interrupt-map", NULL) != NULL) { 165 printk("oops, node %s doesn't have #interrupt-cells\n", 166 p->full_name); 167 return 1; 168 } 169 } 170 #ifdef DEBUG_IRQ 171 printk("prom_n_intr_cells failed for %s\n", np->full_name); 172 #endif 173 return 1; 174 } 175 176 /* 177 * Map an interrupt from a device up to the platform interrupt 178 * descriptor. 179 */ 180 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler, 181 struct device_node *np, unsigned int *ints, 182 int nintrc) 183 { 184 struct device_node *p, *ipar; 185 unsigned int *imap, *imask, *ip; 186 int i, imaplen, match; 187 int newintrc = 0, newaddrc = 0; 188 unsigned int *reg; 189 int naddrc; 190 191 reg = (unsigned int *) get_property(np, "reg", NULL); 192 naddrc = prom_n_addr_cells(np); 193 p = intr_parent(np); 194 while (p != NULL) { 195 if (get_property(p, "interrupt-controller", NULL) != NULL) 196 /* this node is an interrupt controller, stop here */ 197 break; 198 imap = (unsigned int *) 199 get_property(p, "interrupt-map", &imaplen); 200 if (imap == NULL) { 201 p = intr_parent(p); 202 continue; 203 } 204 imask = (unsigned int *) 205 get_property(p, "interrupt-map-mask", NULL); 206 if (imask == NULL) { 207 printk("oops, %s has interrupt-map but no mask\n", 208 p->full_name); 209 return 0; 210 } 211 imaplen /= sizeof(unsigned int); 212 match = 0; 213 ipar = NULL; 214 while (imaplen > 0 && !match) { 215 /* check the child-interrupt field */ 216 match = 1; 217 for (i = 0; i < naddrc && match; ++i) 218 match = ((reg[i] ^ imap[i]) & imask[i]) == 0; 219 for (; i < naddrc + nintrc && match; ++i) 220 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0; 221 imap += naddrc + nintrc; 222 imaplen -= naddrc + nintrc; 223 /* grab the interrupt parent */ 224 ipar = find_phandle((phandle) *imap++); 225 --imaplen; 226 if (ipar == NULL && num_interrupt_controllers == 1) 227 /* cope with BootX not giving us phandles */ 228 ipar = dflt_interrupt_controller; 229 if (ipar == NULL) { 230 printk("oops, no int parent %x in map of %s\n", 231 imap[-1], p->full_name); 232 return 0; 233 } 234 /* find the parent's # addr and intr cells */ 235 ip = (unsigned int *) 236 get_property(ipar, "#interrupt-cells", NULL); 237 if (ip == NULL) { 238 printk("oops, no #interrupt-cells on %s\n", 239 ipar->full_name); 240 return 0; 241 } 242 newintrc = *ip; 243 ip = (unsigned int *) 244 get_property(ipar, "#address-cells", NULL); 245 newaddrc = (ip == NULL)? 0: *ip; 246 imap += newaddrc + newintrc; 247 imaplen -= newaddrc + newintrc; 248 } 249 if (imaplen < 0) { 250 printk("oops, error decoding int-map on %s, len=%d\n", 251 p->full_name, imaplen); 252 return 0; 253 } 254 if (!match) { 255 #ifdef DEBUG_IRQ 256 printk("oops, no match in %s int-map for %s\n", 257 p->full_name, np->full_name); 258 #endif 259 return 0; 260 } 261 p = ipar; 262 naddrc = newaddrc; 263 nintrc = newintrc; 264 ints = imap - nintrc; 265 reg = ints - naddrc; 266 } 267 if (p == NULL) { 268 #ifdef DEBUG_IRQ 269 printk("hmmm, int tree for %s doesn't have ctrler\n", 270 np->full_name); 271 #endif 272 return 0; 273 } 274 *irq = ints; 275 *ictrler = p; 276 return nintrc; 277 } 278 279 static unsigned char map_isa_senses[4] = { 280 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 281 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 282 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 283 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE 284 }; 285 286 static unsigned char map_mpic_senses[4] = { 287 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE, 288 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 289 /* 2 seems to be used for the 8259 cascade... */ 290 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 291 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 292 }; 293 294 static int __devinit finish_node_interrupts(struct device_node *np, 295 unsigned long *mem_start, 296 int measure_only) 297 { 298 unsigned int *ints; 299 int intlen, intrcells, intrcount; 300 int i, j, n, sense; 301 unsigned int *irq, virq; 302 struct device_node *ic; 303 int trace = 0; 304 305 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0) 306 #define TRACE(fmt...) 307 308 if (!strcmp(np->name, "smu-doorbell")) 309 trace = 1; 310 311 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n", 312 num_interrupt_controllers); 313 314 if (num_interrupt_controllers == 0) { 315 /* 316 * Old machines just have a list of interrupt numbers 317 * and no interrupt-controller nodes. 318 */ 319 ints = (unsigned int *) get_property(np, "AAPL,interrupts", 320 &intlen); 321 /* XXX old interpret_pci_props looked in parent too */ 322 /* XXX old interpret_macio_props looked for interrupts 323 before AAPL,interrupts */ 324 if (ints == NULL) 325 ints = (unsigned int *) get_property(np, "interrupts", 326 &intlen); 327 if (ints == NULL) 328 return 0; 329 330 np->n_intrs = intlen / sizeof(unsigned int); 331 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]), 332 mem_start); 333 if (!np->intrs) 334 return -ENOMEM; 335 if (measure_only) 336 return 0; 337 338 for (i = 0; i < np->n_intrs; ++i) { 339 np->intrs[i].line = *ints++; 340 np->intrs[i].sense = IRQ_SENSE_LEVEL 341 | IRQ_POLARITY_NEGATIVE; 342 } 343 return 0; 344 } 345 346 ints = (unsigned int *) get_property(np, "interrupts", &intlen); 347 TRACE("ints=%p, intlen=%d\n", ints, intlen); 348 if (ints == NULL) 349 return 0; 350 intrcells = prom_n_intr_cells(np); 351 intlen /= intrcells * sizeof(unsigned int); 352 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen); 353 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); 354 if (!np->intrs) 355 return -ENOMEM; 356 357 if (measure_only) 358 return 0; 359 360 intrcount = 0; 361 for (i = 0; i < intlen; ++i, ints += intrcells) { 362 n = map_interrupt(&irq, &ic, np, ints, intrcells); 363 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n); 364 if (n <= 0) 365 continue; 366 367 /* don't map IRQ numbers under a cascaded 8259 controller */ 368 if (ic && device_is_compatible(ic, "chrp,iic")) { 369 np->intrs[intrcount].line = irq[0]; 370 sense = (n > 1)? (irq[1] & 3): 3; 371 np->intrs[intrcount].sense = map_isa_senses[sense]; 372 } else { 373 virq = virt_irq_create_mapping(irq[0]); 374 TRACE("virq=%d\n", virq); 375 #ifdef CONFIG_PPC64 376 if (virq == NO_IRQ) { 377 printk(KERN_CRIT "Could not allocate interrupt" 378 " number for %s\n", np->full_name); 379 continue; 380 } 381 #endif 382 np->intrs[intrcount].line = irq_offset_up(virq); 383 sense = (n > 1)? (irq[1] & 3): 1; 384 385 /* Apple uses bits in there in a different way, let's 386 * only keep the real sense bit on macs 387 */ 388 if (machine_is(powermac)) 389 sense &= 0x1; 390 np->intrs[intrcount].sense = map_mpic_senses[sense]; 391 } 392 393 #ifdef CONFIG_PPC64 394 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ 395 if (machine_is(powermac) && ic && ic->parent) { 396 char *name = get_property(ic->parent, "name", NULL); 397 if (name && !strcmp(name, "u3")) 398 np->intrs[intrcount].line += 128; 399 else if (!(name && (!strcmp(name, "mac-io") || 400 !strcmp(name, "u4")))) 401 /* ignore other cascaded controllers, such as 402 the k2-sata-root */ 403 break; 404 } 405 #endif /* CONFIG_PPC64 */ 406 if (n > 2) { 407 printk("hmmm, got %d intr cells for %s:", n, 408 np->full_name); 409 for (j = 0; j < n; ++j) 410 printk(" %d", irq[j]); 411 printk("\n"); 412 } 413 ++intrcount; 414 } 415 np->n_intrs = intrcount; 416 417 return 0; 418 } 419 420 static int __devinit finish_node(struct device_node *np, 421 unsigned long *mem_start, 422 int measure_only) 423 { 424 struct device_node *child; 425 int rc = 0; 426 427 rc = finish_node_interrupts(np, mem_start, measure_only); 428 if (rc) 429 goto out; 430 431 for (child = np->child; child != NULL; child = child->sibling) { 432 rc = finish_node(child, mem_start, measure_only); 433 if (rc) 434 goto out; 435 } 436 out: 437 return rc; 438 } 439 440 static void __init scan_interrupt_controllers(void) 441 { 442 struct device_node *np; 443 int n = 0; 444 char *name, *ic; 445 int iclen; 446 447 for (np = allnodes; np != NULL; np = np->allnext) { 448 ic = get_property(np, "interrupt-controller", &iclen); 449 name = get_property(np, "name", NULL); 450 /* checking iclen makes sure we don't get a false 451 match on /chosen.interrupt_controller */ 452 if ((name != NULL 453 && strcmp(name, "interrupt-controller") == 0) 454 || (ic != NULL && iclen == 0 455 && strcmp(name, "AppleKiwi"))) { 456 if (n == 0) 457 dflt_interrupt_controller = np; 458 ++n; 459 } 460 } 461 num_interrupt_controllers = n; 462 } 463 464 /** 465 * finish_device_tree is called once things are running normally 466 * (i.e. with text and data mapped to the address they were linked at). 467 * It traverses the device tree and fills in some of the additional, 468 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt 469 * mapping is also initialized at this point. 470 */ 471 void __init finish_device_tree(void) 472 { 473 unsigned long start, end, size = 0; 474 475 DBG(" -> finish_device_tree\n"); 476 477 #ifdef CONFIG_PPC64 478 /* Initialize virtual IRQ map */ 479 virt_irq_init(); 480 #endif 481 scan_interrupt_controllers(); 482 483 /* 484 * Finish device-tree (pre-parsing some properties etc...) 485 * We do this in 2 passes. One with "measure_only" set, which 486 * will only measure the amount of memory needed, then we can 487 * allocate that memory, and call finish_node again. However, 488 * we must be careful as most routines will fail nowadays when 489 * prom_alloc() returns 0, so we must make sure our first pass 490 * doesn't start at 0. We pre-initialize size to 16 for that 491 * reason and then remove those additional 16 bytes 492 */ 493 size = 16; 494 finish_node(allnodes, &size, 1); 495 size -= 16; 496 497 if (0 == size) 498 end = start = 0; 499 else 500 end = start = (unsigned long)__va(lmb_alloc(size, 128)); 501 502 finish_node(allnodes, &end, 0); 503 BUG_ON(end != start + size); 504 505 DBG(" <- finish_device_tree\n"); 506 } 507 508 static inline char *find_flat_dt_string(u32 offset) 509 { 510 return ((char *)initial_boot_params) + 511 initial_boot_params->off_dt_strings + offset; 512 } 513 514 /** 515 * This function is used to scan the flattened device-tree, it is 516 * used to extract the memory informations at boot before we can 517 * unflatten the tree 518 */ 519 int __init of_scan_flat_dt(int (*it)(unsigned long node, 520 const char *uname, int depth, 521 void *data), 522 void *data) 523 { 524 unsigned long p = ((unsigned long)initial_boot_params) + 525 initial_boot_params->off_dt_struct; 526 int rc = 0; 527 int depth = -1; 528 529 do { 530 u32 tag = *((u32 *)p); 531 char *pathp; 532 533 p += 4; 534 if (tag == OF_DT_END_NODE) { 535 depth --; 536 continue; 537 } 538 if (tag == OF_DT_NOP) 539 continue; 540 if (tag == OF_DT_END) 541 break; 542 if (tag == OF_DT_PROP) { 543 u32 sz = *((u32 *)p); 544 p += 8; 545 if (initial_boot_params->version < 0x10) 546 p = _ALIGN(p, sz >= 8 ? 8 : 4); 547 p += sz; 548 p = _ALIGN(p, 4); 549 continue; 550 } 551 if (tag != OF_DT_BEGIN_NODE) { 552 printk(KERN_WARNING "Invalid tag %x scanning flattened" 553 " device tree !\n", tag); 554 return -EINVAL; 555 } 556 depth++; 557 pathp = (char *)p; 558 p = _ALIGN(p + strlen(pathp) + 1, 4); 559 if ((*pathp) == '/') { 560 char *lp, *np; 561 for (lp = NULL, np = pathp; *np; np++) 562 if ((*np) == '/') 563 lp = np+1; 564 if (lp != NULL) 565 pathp = lp; 566 } 567 rc = it(p, pathp, depth, data); 568 if (rc != 0) 569 break; 570 } while(1); 571 572 return rc; 573 } 574 575 unsigned long __init of_get_flat_dt_root(void) 576 { 577 unsigned long p = ((unsigned long)initial_boot_params) + 578 initial_boot_params->off_dt_struct; 579 580 while(*((u32 *)p) == OF_DT_NOP) 581 p += 4; 582 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); 583 p += 4; 584 return _ALIGN(p + strlen((char *)p) + 1, 4); 585 } 586 587 /** 588 * This function can be used within scan_flattened_dt callback to get 589 * access to properties 590 */ 591 void* __init of_get_flat_dt_prop(unsigned long node, const char *name, 592 unsigned long *size) 593 { 594 unsigned long p = node; 595 596 do { 597 u32 tag = *((u32 *)p); 598 u32 sz, noff; 599 const char *nstr; 600 601 p += 4; 602 if (tag == OF_DT_NOP) 603 continue; 604 if (tag != OF_DT_PROP) 605 return NULL; 606 607 sz = *((u32 *)p); 608 noff = *((u32 *)(p + 4)); 609 p += 8; 610 if (initial_boot_params->version < 0x10) 611 p = _ALIGN(p, sz >= 8 ? 8 : 4); 612 613 nstr = find_flat_dt_string(noff); 614 if (nstr == NULL) { 615 printk(KERN_WARNING "Can't find property index" 616 " name !\n"); 617 return NULL; 618 } 619 if (strcmp(name, nstr) == 0) { 620 if (size) 621 *size = sz; 622 return (void *)p; 623 } 624 p += sz; 625 p = _ALIGN(p, 4); 626 } while(1); 627 } 628 629 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 630 { 631 const char* cp; 632 unsigned long cplen, l; 633 634 cp = of_get_flat_dt_prop(node, "compatible", &cplen); 635 if (cp == NULL) 636 return 0; 637 while (cplen > 0) { 638 if (strncasecmp(cp, compat, strlen(compat)) == 0) 639 return 1; 640 l = strlen(cp) + 1; 641 cp += l; 642 cplen -= l; 643 } 644 645 return 0; 646 } 647 648 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 649 unsigned long align) 650 { 651 void *res; 652 653 *mem = _ALIGN(*mem, align); 654 res = (void *)*mem; 655 *mem += size; 656 657 return res; 658 } 659 660 static unsigned long __init unflatten_dt_node(unsigned long mem, 661 unsigned long *p, 662 struct device_node *dad, 663 struct device_node ***allnextpp, 664 unsigned long fpsize) 665 { 666 struct device_node *np; 667 struct property *pp, **prev_pp = NULL; 668 char *pathp; 669 u32 tag; 670 unsigned int l, allocl; 671 int has_name = 0; 672 int new_format = 0; 673 674 tag = *((u32 *)(*p)); 675 if (tag != OF_DT_BEGIN_NODE) { 676 printk("Weird tag at start of node: %x\n", tag); 677 return mem; 678 } 679 *p += 4; 680 pathp = (char *)*p; 681 l = allocl = strlen(pathp) + 1; 682 *p = _ALIGN(*p + l, 4); 683 684 /* version 0x10 has a more compact unit name here instead of the full 685 * path. we accumulate the full path size using "fpsize", we'll rebuild 686 * it later. We detect this because the first character of the name is 687 * not '/'. 688 */ 689 if ((*pathp) != '/') { 690 new_format = 1; 691 if (fpsize == 0) { 692 /* root node: special case. fpsize accounts for path 693 * plus terminating zero. root node only has '/', so 694 * fpsize should be 2, but we want to avoid the first 695 * level nodes to have two '/' so we use fpsize 1 here 696 */ 697 fpsize = 1; 698 allocl = 2; 699 } else { 700 /* account for '/' and path size minus terminal 0 701 * already in 'l' 702 */ 703 fpsize += l; 704 allocl = fpsize; 705 } 706 } 707 708 709 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 710 __alignof__(struct device_node)); 711 if (allnextpp) { 712 memset(np, 0, sizeof(*np)); 713 np->full_name = ((char*)np) + sizeof(struct device_node); 714 if (new_format) { 715 char *p = np->full_name; 716 /* rebuild full path for new format */ 717 if (dad && dad->parent) { 718 strcpy(p, dad->full_name); 719 #ifdef DEBUG 720 if ((strlen(p) + l + 1) != allocl) { 721 DBG("%s: p: %d, l: %d, a: %d\n", 722 pathp, (int)strlen(p), l, allocl); 723 } 724 #endif 725 p += strlen(p); 726 } 727 *(p++) = '/'; 728 memcpy(p, pathp, l); 729 } else 730 memcpy(np->full_name, pathp, l); 731 prev_pp = &np->properties; 732 **allnextpp = np; 733 *allnextpp = &np->allnext; 734 if (dad != NULL) { 735 np->parent = dad; 736 /* we temporarily use the next field as `last_child'*/ 737 if (dad->next == 0) 738 dad->child = np; 739 else 740 dad->next->sibling = np; 741 dad->next = np; 742 } 743 kref_init(&np->kref); 744 } 745 while(1) { 746 u32 sz, noff; 747 char *pname; 748 749 tag = *((u32 *)(*p)); 750 if (tag == OF_DT_NOP) { 751 *p += 4; 752 continue; 753 } 754 if (tag != OF_DT_PROP) 755 break; 756 *p += 4; 757 sz = *((u32 *)(*p)); 758 noff = *((u32 *)((*p) + 4)); 759 *p += 8; 760 if (initial_boot_params->version < 0x10) 761 *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 762 763 pname = find_flat_dt_string(noff); 764 if (pname == NULL) { 765 printk("Can't find property name in list !\n"); 766 break; 767 } 768 if (strcmp(pname, "name") == 0) 769 has_name = 1; 770 l = strlen(pname) + 1; 771 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 772 __alignof__(struct property)); 773 if (allnextpp) { 774 if (strcmp(pname, "linux,phandle") == 0) { 775 np->node = *((u32 *)*p); 776 if (np->linux_phandle == 0) 777 np->linux_phandle = np->node; 778 } 779 if (strcmp(pname, "ibm,phandle") == 0) 780 np->linux_phandle = *((u32 *)*p); 781 pp->name = pname; 782 pp->length = sz; 783 pp->value = (void *)*p; 784 *prev_pp = pp; 785 prev_pp = &pp->next; 786 } 787 *p = _ALIGN((*p) + sz, 4); 788 } 789 /* with version 0x10 we may not have the name property, recreate 790 * it here from the unit name if absent 791 */ 792 if (!has_name) { 793 char *p = pathp, *ps = pathp, *pa = NULL; 794 int sz; 795 796 while (*p) { 797 if ((*p) == '@') 798 pa = p; 799 if ((*p) == '/') 800 ps = p + 1; 801 p++; 802 } 803 if (pa < ps) 804 pa = p; 805 sz = (pa - ps) + 1; 806 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 807 __alignof__(struct property)); 808 if (allnextpp) { 809 pp->name = "name"; 810 pp->length = sz; 811 pp->value = (unsigned char *)(pp + 1); 812 *prev_pp = pp; 813 prev_pp = &pp->next; 814 memcpy(pp->value, ps, sz - 1); 815 ((char *)pp->value)[sz - 1] = 0; 816 DBG("fixed up name for %s -> %s\n", pathp, pp->value); 817 } 818 } 819 if (allnextpp) { 820 *prev_pp = NULL; 821 np->name = get_property(np, "name", NULL); 822 np->type = get_property(np, "device_type", NULL); 823 824 if (!np->name) 825 np->name = "<NULL>"; 826 if (!np->type) 827 np->type = "<NULL>"; 828 } 829 while (tag == OF_DT_BEGIN_NODE) { 830 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 831 tag = *((u32 *)(*p)); 832 } 833 if (tag != OF_DT_END_NODE) { 834 printk("Weird tag at end of node: %x\n", tag); 835 return mem; 836 } 837 *p += 4; 838 return mem; 839 } 840 841 static int __init early_parse_mem(char *p) 842 { 843 if (!p) 844 return 1; 845 846 memory_limit = PAGE_ALIGN(memparse(p, &p)); 847 DBG("memory limit = 0x%lx\n", memory_limit); 848 849 return 0; 850 } 851 early_param("mem", early_parse_mem); 852 853 /* 854 * The device tree may be allocated below our memory limit, or inside the 855 * crash kernel region for kdump. If so, move it out now. 856 */ 857 static void move_device_tree(void) 858 { 859 unsigned long start, size; 860 void *p; 861 862 DBG("-> move_device_tree\n"); 863 864 start = __pa(initial_boot_params); 865 size = initial_boot_params->totalsize; 866 867 if ((memory_limit && (start + size) > memory_limit) || 868 overlaps_crashkernel(start, size)) { 869 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); 870 memcpy(p, initial_boot_params, size); 871 initial_boot_params = (struct boot_param_header *)p; 872 DBG("Moved device tree to 0x%p\n", p); 873 } 874 875 DBG("<- move_device_tree\n"); 876 } 877 878 /** 879 * unflattens the device-tree passed by the firmware, creating the 880 * tree of struct device_node. It also fills the "name" and "type" 881 * pointers of the nodes so the normal device-tree walking functions 882 * can be used (this used to be done by finish_device_tree) 883 */ 884 void __init unflatten_device_tree(void) 885 { 886 unsigned long start, mem, size; 887 struct device_node **allnextp = &allnodes; 888 889 DBG(" -> unflatten_device_tree()\n"); 890 891 /* First pass, scan for size */ 892 start = ((unsigned long)initial_boot_params) + 893 initial_boot_params->off_dt_struct; 894 size = unflatten_dt_node(0, &start, NULL, NULL, 0); 895 size = (size | 3) + 1; 896 897 DBG(" size is %lx, allocating...\n", size); 898 899 /* Allocate memory for the expanded device tree */ 900 mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 901 mem = (unsigned long) __va(mem); 902 903 ((u32 *)mem)[size / 4] = 0xdeadbeef; 904 905 DBG(" unflattening %lx...\n", mem); 906 907 /* Second pass, do actual unflattening */ 908 start = ((unsigned long)initial_boot_params) + 909 initial_boot_params->off_dt_struct; 910 unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 911 if (*((u32 *)start) != OF_DT_END) 912 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 913 if (((u32 *)mem)[size / 4] != 0xdeadbeef) 914 printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 915 ((u32 *)mem)[size / 4] ); 916 *allnextp = NULL; 917 918 /* Get pointer to OF "/chosen" node for use everywhere */ 919 of_chosen = of_find_node_by_path("/chosen"); 920 if (of_chosen == NULL) 921 of_chosen = of_find_node_by_path("/chosen@0"); 922 923 DBG(" <- unflatten_device_tree()\n"); 924 } 925 926 /* 927 * ibm,pa-features is a per-cpu property that contains a string of 928 * attribute descriptors, each of which has a 2 byte header plus up 929 * to 254 bytes worth of processor attribute bits. First header 930 * byte specifies the number of bytes following the header. 931 * Second header byte is an "attribute-specifier" type, of which 932 * zero is the only currently-defined value. 933 * Implementation: Pass in the byte and bit offset for the feature 934 * that we are interested in. The function will return -1 if the 935 * pa-features property is missing, or a 1/0 to indicate if the feature 936 * is supported/not supported. Note that the bit numbers are 937 * big-endian to match the definition in PAPR. 938 */ 939 static struct ibm_pa_feature { 940 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 941 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 942 unsigned char pabyte; /* byte number in ibm,pa-features */ 943 unsigned char pabit; /* bit number (big-endian) */ 944 unsigned char invert; /* if 1, pa bit set => clear feature */ 945 } ibm_pa_features[] __initdata = { 946 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 947 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 948 {CPU_FTR_SLB, 0, 0, 2, 0}, 949 {CPU_FTR_CTRL, 0, 0, 3, 0}, 950 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 951 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 952 #if 0 953 /* put this back once we know how to test if firmware does 64k IO */ 954 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 955 #endif 956 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 957 }; 958 959 static void __init check_cpu_pa_features(unsigned long node) 960 { 961 unsigned char *pa_ftrs; 962 unsigned long len, tablelen, i, bit; 963 964 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); 965 if (pa_ftrs == NULL) 966 return; 967 968 /* find descriptor with type == 0 */ 969 for (;;) { 970 if (tablelen < 3) 971 return; 972 len = 2 + pa_ftrs[0]; 973 if (tablelen < len) 974 return; /* descriptor 0 not found */ 975 if (pa_ftrs[1] == 0) 976 break; 977 tablelen -= len; 978 pa_ftrs += len; 979 } 980 981 /* loop over bits we know about */ 982 for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) { 983 struct ibm_pa_feature *fp = &ibm_pa_features[i]; 984 985 if (fp->pabyte >= pa_ftrs[0]) 986 continue; 987 bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 988 if (bit ^ fp->invert) { 989 cur_cpu_spec->cpu_features |= fp->cpu_features; 990 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 991 } else { 992 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 993 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 994 } 995 } 996 } 997 998 static int __init early_init_dt_scan_cpus(unsigned long node, 999 const char *uname, int depth, 1000 void *data) 1001 { 1002 static int logical_cpuid = 0; 1003 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 1004 #ifdef CONFIG_ALTIVEC 1005 u32 *prop; 1006 #endif 1007 u32 *intserv; 1008 int i, nthreads; 1009 unsigned long len; 1010 int found = 0; 1011 1012 /* We are scanning "cpu" nodes only */ 1013 if (type == NULL || strcmp(type, "cpu") != 0) 1014 return 0; 1015 1016 /* Get physical cpuid */ 1017 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); 1018 if (intserv) { 1019 nthreads = len / sizeof(int); 1020 } else { 1021 intserv = of_get_flat_dt_prop(node, "reg", NULL); 1022 nthreads = 1; 1023 } 1024 1025 /* 1026 * Now see if any of these threads match our boot cpu. 1027 * NOTE: This must match the parsing done in smp_setup_cpu_maps. 1028 */ 1029 for (i = 0; i < nthreads; i++) { 1030 /* 1031 * version 2 of the kexec param format adds the phys cpuid of 1032 * booted proc. 1033 */ 1034 if (initial_boot_params && initial_boot_params->version >= 2) { 1035 if (intserv[i] == 1036 initial_boot_params->boot_cpuid_phys) { 1037 found = 1; 1038 break; 1039 } 1040 } else { 1041 /* 1042 * Check if it's the boot-cpu, set it's hw index now, 1043 * unfortunately this format did not support booting 1044 * off secondary threads. 1045 */ 1046 if (of_get_flat_dt_prop(node, 1047 "linux,boot-cpu", NULL) != NULL) { 1048 found = 1; 1049 break; 1050 } 1051 } 1052 1053 #ifdef CONFIG_SMP 1054 /* logical cpu id is always 0 on UP kernels */ 1055 logical_cpuid++; 1056 #endif 1057 } 1058 1059 if (found) { 1060 DBG("boot cpu: logical %d physical %d\n", logical_cpuid, 1061 intserv[i]); 1062 boot_cpuid = logical_cpuid; 1063 set_hard_smp_processor_id(boot_cpuid, intserv[i]); 1064 } 1065 1066 #ifdef CONFIG_ALTIVEC 1067 /* Check if we have a VMX and eventually update CPU features */ 1068 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); 1069 if (prop && (*prop) > 0) { 1070 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1071 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1072 } 1073 1074 /* Same goes for Apple's "altivec" property */ 1075 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); 1076 if (prop) { 1077 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1078 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1079 } 1080 #endif /* CONFIG_ALTIVEC */ 1081 1082 check_cpu_pa_features(node); 1083 1084 #ifdef CONFIG_PPC_PSERIES 1085 if (nthreads > 1) 1086 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 1087 else 1088 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 1089 #endif 1090 1091 return 0; 1092 } 1093 1094 static int __init early_init_dt_scan_chosen(unsigned long node, 1095 const char *uname, int depth, void *data) 1096 { 1097 unsigned long *lprop; 1098 unsigned long l; 1099 char *p; 1100 1101 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1102 1103 if (depth != 1 || 1104 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 1105 return 0; 1106 1107 #ifdef CONFIG_PPC64 1108 /* check if iommu is forced on or off */ 1109 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 1110 iommu_is_off = 1; 1111 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 1112 iommu_force_on = 1; 1113 #endif 1114 1115 /* mem=x on the command line is the preferred mechanism */ 1116 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 1117 if (lprop) 1118 memory_limit = *lprop; 1119 1120 #ifdef CONFIG_PPC64 1121 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 1122 if (lprop) 1123 tce_alloc_start = *lprop; 1124 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 1125 if (lprop) 1126 tce_alloc_end = *lprop; 1127 #endif 1128 1129 #ifdef CONFIG_KEXEC 1130 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 1131 if (lprop) 1132 crashk_res.start = *lprop; 1133 1134 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 1135 if (lprop) 1136 crashk_res.end = crashk_res.start + *lprop - 1; 1137 #endif 1138 1139 /* Retreive command line */ 1140 p = of_get_flat_dt_prop(node, "bootargs", &l); 1141 if (p != NULL && l > 0) 1142 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 1143 1144 #ifdef CONFIG_CMDLINE 1145 if (l == 0 || (l == 1 && (*p) == 0)) 1146 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 1147 #endif /* CONFIG_CMDLINE */ 1148 1149 DBG("Command line is: %s\n", cmd_line); 1150 1151 /* break now */ 1152 return 1; 1153 } 1154 1155 static int __init early_init_dt_scan_root(unsigned long node, 1156 const char *uname, int depth, void *data) 1157 { 1158 u32 *prop; 1159 1160 if (depth != 0) 1161 return 0; 1162 1163 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 1164 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1165 DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 1166 1167 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 1168 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1169 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1170 1171 /* break now */ 1172 return 1; 1173 } 1174 1175 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) 1176 { 1177 cell_t *p = *cellp; 1178 unsigned long r; 1179 1180 /* Ignore more than 2 cells */ 1181 while (s > sizeof(unsigned long) / 4) { 1182 p++; 1183 s--; 1184 } 1185 r = *p++; 1186 #ifdef CONFIG_PPC64 1187 if (s > 1) { 1188 r <<= 32; 1189 r |= *(p++); 1190 s--; 1191 } 1192 #endif 1193 1194 *cellp = p; 1195 return r; 1196 } 1197 1198 1199 static int __init early_init_dt_scan_memory(unsigned long node, 1200 const char *uname, int depth, void *data) 1201 { 1202 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 1203 cell_t *reg, *endp; 1204 unsigned long l; 1205 1206 /* We are scanning "memory" nodes only */ 1207 if (type == NULL) { 1208 /* 1209 * The longtrail doesn't have a device_type on the 1210 * /memory node, so look for the node called /memory@0. 1211 */ 1212 if (depth != 1 || strcmp(uname, "memory@0") != 0) 1213 return 0; 1214 } else if (strcmp(type, "memory") != 0) 1215 return 0; 1216 1217 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 1218 if (reg == NULL) 1219 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 1220 if (reg == NULL) 1221 return 0; 1222 1223 endp = reg + (l / sizeof(cell_t)); 1224 1225 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 1226 uname, l, reg[0], reg[1], reg[2], reg[3]); 1227 1228 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1229 unsigned long base, size; 1230 1231 base = dt_mem_next_cell(dt_root_addr_cells, ®); 1232 size = dt_mem_next_cell(dt_root_size_cells, ®); 1233 1234 if (size == 0) 1235 continue; 1236 DBG(" - %lx , %lx\n", base, size); 1237 #ifdef CONFIG_PPC64 1238 if (iommu_is_off) { 1239 if (base >= 0x80000000ul) 1240 continue; 1241 if ((base + size) > 0x80000000ul) 1242 size = 0x80000000ul - base; 1243 } 1244 #endif 1245 lmb_add(base, size); 1246 } 1247 return 0; 1248 } 1249 1250 static void __init early_reserve_mem(void) 1251 { 1252 u64 base, size; 1253 u64 *reserve_map; 1254 unsigned long self_base; 1255 unsigned long self_size; 1256 1257 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 1258 initial_boot_params->off_mem_rsvmap); 1259 1260 /* before we do anything, lets reserve the dt blob */ 1261 self_base = __pa((unsigned long)initial_boot_params); 1262 self_size = initial_boot_params->totalsize; 1263 lmb_reserve(self_base, self_size); 1264 1265 #ifdef CONFIG_PPC32 1266 /* 1267 * Handle the case where we might be booting from an old kexec 1268 * image that setup the mem_rsvmap as pairs of 32-bit values 1269 */ 1270 if (*reserve_map > 0xffffffffull) { 1271 u32 base_32, size_32; 1272 u32 *reserve_map_32 = (u32 *)reserve_map; 1273 1274 while (1) { 1275 base_32 = *(reserve_map_32++); 1276 size_32 = *(reserve_map_32++); 1277 if (size_32 == 0) 1278 break; 1279 /* skip if the reservation is for the blob */ 1280 if (base_32 == self_base && size_32 == self_size) 1281 continue; 1282 DBG("reserving: %x -> %x\n", base_32, size_32); 1283 lmb_reserve(base_32, size_32); 1284 } 1285 return; 1286 } 1287 #endif 1288 while (1) { 1289 base = *(reserve_map++); 1290 size = *(reserve_map++); 1291 if (size == 0) 1292 break; 1293 /* skip if the reservation is for the blob */ 1294 if (base == self_base && size == self_size) 1295 continue; 1296 DBG("reserving: %llx -> %llx\n", base, size); 1297 lmb_reserve(base, size); 1298 } 1299 1300 #if 0 1301 DBG("memory reserved, lmbs :\n"); 1302 lmb_dump_all(); 1303 #endif 1304 } 1305 1306 void __init early_init_devtree(void *params) 1307 { 1308 DBG(" -> early_init_devtree()\n"); 1309 1310 /* Setup flat device-tree pointer */ 1311 initial_boot_params = params; 1312 1313 #ifdef CONFIG_PPC_RTAS 1314 /* Some machines might need RTAS info for debugging, grab it now. */ 1315 of_scan_flat_dt(early_init_dt_scan_rtas, NULL); 1316 #endif 1317 1318 /* Retrieve various informations from the /chosen node of the 1319 * device-tree, including the platform type, initrd location and 1320 * size, TCE reserve, and more ... 1321 */ 1322 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 1323 1324 /* Scan memory nodes and rebuild LMBs */ 1325 lmb_init(); 1326 of_scan_flat_dt(early_init_dt_scan_root, NULL); 1327 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1328 1329 /* Save command line for /proc/cmdline and then parse parameters */ 1330 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 1331 parse_early_param(); 1332 1333 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 1334 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 1335 reserve_kdump_trampoline(); 1336 reserve_crashkernel(); 1337 early_reserve_mem(); 1338 1339 lmb_enforce_memory_limit(memory_limit); 1340 lmb_analyze(); 1341 1342 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1343 1344 /* We may need to relocate the flat tree, do it now. 1345 * FIXME .. and the initrd too? */ 1346 move_device_tree(); 1347 1348 DBG("Scanning CPUs ...\n"); 1349 1350 /* Retreive CPU related informations from the flat tree 1351 * (altivec support, boot CPU ID, ...) 1352 */ 1353 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 1354 1355 DBG(" <- early_init_devtree()\n"); 1356 } 1357 1358 #undef printk 1359 1360 int 1361 prom_n_addr_cells(struct device_node* np) 1362 { 1363 int* ip; 1364 do { 1365 if (np->parent) 1366 np = np->parent; 1367 ip = (int *) get_property(np, "#address-cells", NULL); 1368 if (ip != NULL) 1369 return *ip; 1370 } while (np->parent); 1371 /* No #address-cells property for the root node, default to 1 */ 1372 return 1; 1373 } 1374 EXPORT_SYMBOL(prom_n_addr_cells); 1375 1376 int 1377 prom_n_size_cells(struct device_node* np) 1378 { 1379 int* ip; 1380 do { 1381 if (np->parent) 1382 np = np->parent; 1383 ip = (int *) get_property(np, "#size-cells", NULL); 1384 if (ip != NULL) 1385 return *ip; 1386 } while (np->parent); 1387 /* No #size-cells property for the root node, default to 1 */ 1388 return 1; 1389 } 1390 EXPORT_SYMBOL(prom_n_size_cells); 1391 1392 /** 1393 * Work out the sense (active-low level / active-high edge) 1394 * of each interrupt from the device tree. 1395 */ 1396 void __init prom_get_irq_senses(unsigned char *senses, int off, int max) 1397 { 1398 struct device_node *np; 1399 int i, j; 1400 1401 /* default to level-triggered */ 1402 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off); 1403 1404 for (np = allnodes; np != 0; np = np->allnext) { 1405 for (j = 0; j < np->n_intrs; j++) { 1406 i = np->intrs[j].line; 1407 if (i >= off && i < max) 1408 senses[i-off] = np->intrs[j].sense; 1409 } 1410 } 1411 } 1412 1413 /** 1414 * Construct and return a list of the device_nodes with a given name. 1415 */ 1416 struct device_node *find_devices(const char *name) 1417 { 1418 struct device_node *head, **prevp, *np; 1419 1420 prevp = &head; 1421 for (np = allnodes; np != 0; np = np->allnext) { 1422 if (np->name != 0 && strcasecmp(np->name, name) == 0) { 1423 *prevp = np; 1424 prevp = &np->next; 1425 } 1426 } 1427 *prevp = NULL; 1428 return head; 1429 } 1430 EXPORT_SYMBOL(find_devices); 1431 1432 /** 1433 * Construct and return a list of the device_nodes with a given type. 1434 */ 1435 struct device_node *find_type_devices(const char *type) 1436 { 1437 struct device_node *head, **prevp, *np; 1438 1439 prevp = &head; 1440 for (np = allnodes; np != 0; np = np->allnext) { 1441 if (np->type != 0 && strcasecmp(np->type, type) == 0) { 1442 *prevp = np; 1443 prevp = &np->next; 1444 } 1445 } 1446 *prevp = NULL; 1447 return head; 1448 } 1449 EXPORT_SYMBOL(find_type_devices); 1450 1451 /** 1452 * Returns all nodes linked together 1453 */ 1454 struct device_node *find_all_nodes(void) 1455 { 1456 struct device_node *head, **prevp, *np; 1457 1458 prevp = &head; 1459 for (np = allnodes; np != 0; np = np->allnext) { 1460 *prevp = np; 1461 prevp = &np->next; 1462 } 1463 *prevp = NULL; 1464 return head; 1465 } 1466 EXPORT_SYMBOL(find_all_nodes); 1467 1468 /** Checks if the given "compat" string matches one of the strings in 1469 * the device's "compatible" property 1470 */ 1471 int device_is_compatible(struct device_node *device, const char *compat) 1472 { 1473 const char* cp; 1474 int cplen, l; 1475 1476 cp = (char *) get_property(device, "compatible", &cplen); 1477 if (cp == NULL) 1478 return 0; 1479 while (cplen > 0) { 1480 if (strncasecmp(cp, compat, strlen(compat)) == 0) 1481 return 1; 1482 l = strlen(cp) + 1; 1483 cp += l; 1484 cplen -= l; 1485 } 1486 1487 return 0; 1488 } 1489 EXPORT_SYMBOL(device_is_compatible); 1490 1491 1492 /** 1493 * Indicates whether the root node has a given value in its 1494 * compatible property. 1495 */ 1496 int machine_is_compatible(const char *compat) 1497 { 1498 struct device_node *root; 1499 int rc = 0; 1500 1501 root = of_find_node_by_path("/"); 1502 if (root) { 1503 rc = device_is_compatible(root, compat); 1504 of_node_put(root); 1505 } 1506 return rc; 1507 } 1508 EXPORT_SYMBOL(machine_is_compatible); 1509 1510 /** 1511 * Construct and return a list of the device_nodes with a given type 1512 * and compatible property. 1513 */ 1514 struct device_node *find_compatible_devices(const char *type, 1515 const char *compat) 1516 { 1517 struct device_node *head, **prevp, *np; 1518 1519 prevp = &head; 1520 for (np = allnodes; np != 0; np = np->allnext) { 1521 if (type != NULL 1522 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1523 continue; 1524 if (device_is_compatible(np, compat)) { 1525 *prevp = np; 1526 prevp = &np->next; 1527 } 1528 } 1529 *prevp = NULL; 1530 return head; 1531 } 1532 EXPORT_SYMBOL(find_compatible_devices); 1533 1534 /** 1535 * Find the device_node with a given full_name. 1536 */ 1537 struct device_node *find_path_device(const char *path) 1538 { 1539 struct device_node *np; 1540 1541 for (np = allnodes; np != 0; np = np->allnext) 1542 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0) 1543 return np; 1544 return NULL; 1545 } 1546 EXPORT_SYMBOL(find_path_device); 1547 1548 /******* 1549 * 1550 * New implementation of the OF "find" APIs, return a refcounted 1551 * object, call of_node_put() when done. The device tree and list 1552 * are protected by a rw_lock. 1553 * 1554 * Note that property management will need some locking as well, 1555 * this isn't dealt with yet. 1556 * 1557 *******/ 1558 1559 /** 1560 * of_find_node_by_name - Find a node by its "name" property 1561 * @from: The node to start searching from or NULL, the node 1562 * you pass will not be searched, only the next one 1563 * will; typically, you pass what the previous call 1564 * returned. of_node_put() will be called on it 1565 * @name: The name string to match against 1566 * 1567 * Returns a node pointer with refcount incremented, use 1568 * of_node_put() on it when done. 1569 */ 1570 struct device_node *of_find_node_by_name(struct device_node *from, 1571 const char *name) 1572 { 1573 struct device_node *np; 1574 1575 read_lock(&devtree_lock); 1576 np = from ? from->allnext : allnodes; 1577 for (; np != NULL; np = np->allnext) 1578 if (np->name != NULL && strcasecmp(np->name, name) == 0 1579 && of_node_get(np)) 1580 break; 1581 if (from) 1582 of_node_put(from); 1583 read_unlock(&devtree_lock); 1584 return np; 1585 } 1586 EXPORT_SYMBOL(of_find_node_by_name); 1587 1588 /** 1589 * of_find_node_by_type - Find a node by its "device_type" property 1590 * @from: The node to start searching from or NULL, the node 1591 * you pass will not be searched, only the next one 1592 * will; typically, you pass what the previous call 1593 * returned. of_node_put() will be called on it 1594 * @name: The type string to match against 1595 * 1596 * Returns a node pointer with refcount incremented, use 1597 * of_node_put() on it when done. 1598 */ 1599 struct device_node *of_find_node_by_type(struct device_node *from, 1600 const char *type) 1601 { 1602 struct device_node *np; 1603 1604 read_lock(&devtree_lock); 1605 np = from ? from->allnext : allnodes; 1606 for (; np != 0; np = np->allnext) 1607 if (np->type != 0 && strcasecmp(np->type, type) == 0 1608 && of_node_get(np)) 1609 break; 1610 if (from) 1611 of_node_put(from); 1612 read_unlock(&devtree_lock); 1613 return np; 1614 } 1615 EXPORT_SYMBOL(of_find_node_by_type); 1616 1617 /** 1618 * of_find_compatible_node - Find a node based on type and one of the 1619 * tokens in its "compatible" property 1620 * @from: The node to start searching from or NULL, the node 1621 * you pass will not be searched, only the next one 1622 * will; typically, you pass what the previous call 1623 * returned. of_node_put() will be called on it 1624 * @type: The type string to match "device_type" or NULL to ignore 1625 * @compatible: The string to match to one of the tokens in the device 1626 * "compatible" list. 1627 * 1628 * Returns a node pointer with refcount incremented, use 1629 * of_node_put() on it when done. 1630 */ 1631 struct device_node *of_find_compatible_node(struct device_node *from, 1632 const char *type, const char *compatible) 1633 { 1634 struct device_node *np; 1635 1636 read_lock(&devtree_lock); 1637 np = from ? from->allnext : allnodes; 1638 for (; np != 0; np = np->allnext) { 1639 if (type != NULL 1640 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1641 continue; 1642 if (device_is_compatible(np, compatible) && of_node_get(np)) 1643 break; 1644 } 1645 if (from) 1646 of_node_put(from); 1647 read_unlock(&devtree_lock); 1648 return np; 1649 } 1650 EXPORT_SYMBOL(of_find_compatible_node); 1651 1652 /** 1653 * of_find_node_by_path - Find a node matching a full OF path 1654 * @path: The full path to match 1655 * 1656 * Returns a node pointer with refcount incremented, use 1657 * of_node_put() on it when done. 1658 */ 1659 struct device_node *of_find_node_by_path(const char *path) 1660 { 1661 struct device_node *np = allnodes; 1662 1663 read_lock(&devtree_lock); 1664 for (; np != 0; np = np->allnext) { 1665 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1666 && of_node_get(np)) 1667 break; 1668 } 1669 read_unlock(&devtree_lock); 1670 return np; 1671 } 1672 EXPORT_SYMBOL(of_find_node_by_path); 1673 1674 /** 1675 * of_find_node_by_phandle - Find a node given a phandle 1676 * @handle: phandle of the node to find 1677 * 1678 * Returns a node pointer with refcount incremented, use 1679 * of_node_put() on it when done. 1680 */ 1681 struct device_node *of_find_node_by_phandle(phandle handle) 1682 { 1683 struct device_node *np; 1684 1685 read_lock(&devtree_lock); 1686 for (np = allnodes; np != 0; np = np->allnext) 1687 if (np->linux_phandle == handle) 1688 break; 1689 if (np) 1690 of_node_get(np); 1691 read_unlock(&devtree_lock); 1692 return np; 1693 } 1694 EXPORT_SYMBOL(of_find_node_by_phandle); 1695 1696 /** 1697 * of_find_all_nodes - Get next node in global list 1698 * @prev: Previous node or NULL to start iteration 1699 * of_node_put() will be called on it 1700 * 1701 * Returns a node pointer with refcount incremented, use 1702 * of_node_put() on it when done. 1703 */ 1704 struct device_node *of_find_all_nodes(struct device_node *prev) 1705 { 1706 struct device_node *np; 1707 1708 read_lock(&devtree_lock); 1709 np = prev ? prev->allnext : allnodes; 1710 for (; np != 0; np = np->allnext) 1711 if (of_node_get(np)) 1712 break; 1713 if (prev) 1714 of_node_put(prev); 1715 read_unlock(&devtree_lock); 1716 return np; 1717 } 1718 EXPORT_SYMBOL(of_find_all_nodes); 1719 1720 /** 1721 * of_get_parent - Get a node's parent if any 1722 * @node: Node to get parent 1723 * 1724 * Returns a node pointer with refcount incremented, use 1725 * of_node_put() on it when done. 1726 */ 1727 struct device_node *of_get_parent(const struct device_node *node) 1728 { 1729 struct device_node *np; 1730 1731 if (!node) 1732 return NULL; 1733 1734 read_lock(&devtree_lock); 1735 np = of_node_get(node->parent); 1736 read_unlock(&devtree_lock); 1737 return np; 1738 } 1739 EXPORT_SYMBOL(of_get_parent); 1740 1741 /** 1742 * of_get_next_child - Iterate a node childs 1743 * @node: parent node 1744 * @prev: previous child of the parent node, or NULL to get first 1745 * 1746 * Returns a node pointer with refcount incremented, use 1747 * of_node_put() on it when done. 1748 */ 1749 struct device_node *of_get_next_child(const struct device_node *node, 1750 struct device_node *prev) 1751 { 1752 struct device_node *next; 1753 1754 read_lock(&devtree_lock); 1755 next = prev ? prev->sibling : node->child; 1756 for (; next != 0; next = next->sibling) 1757 if (of_node_get(next)) 1758 break; 1759 if (prev) 1760 of_node_put(prev); 1761 read_unlock(&devtree_lock); 1762 return next; 1763 } 1764 EXPORT_SYMBOL(of_get_next_child); 1765 1766 /** 1767 * of_node_get - Increment refcount of a node 1768 * @node: Node to inc refcount, NULL is supported to 1769 * simplify writing of callers 1770 * 1771 * Returns node. 1772 */ 1773 struct device_node *of_node_get(struct device_node *node) 1774 { 1775 if (node) 1776 kref_get(&node->kref); 1777 return node; 1778 } 1779 EXPORT_SYMBOL(of_node_get); 1780 1781 static inline struct device_node * kref_to_device_node(struct kref *kref) 1782 { 1783 return container_of(kref, struct device_node, kref); 1784 } 1785 1786 /** 1787 * of_node_release - release a dynamically allocated node 1788 * @kref: kref element of the node to be released 1789 * 1790 * In of_node_put() this function is passed to kref_put() 1791 * as the destructor. 1792 */ 1793 static void of_node_release(struct kref *kref) 1794 { 1795 struct device_node *node = kref_to_device_node(kref); 1796 struct property *prop = node->properties; 1797 1798 if (!OF_IS_DYNAMIC(node)) 1799 return; 1800 while (prop) { 1801 struct property *next = prop->next; 1802 kfree(prop->name); 1803 kfree(prop->value); 1804 kfree(prop); 1805 prop = next; 1806 1807 if (!prop) { 1808 prop = node->deadprops; 1809 node->deadprops = NULL; 1810 } 1811 } 1812 kfree(node->intrs); 1813 kfree(node->full_name); 1814 kfree(node->data); 1815 kfree(node); 1816 } 1817 1818 /** 1819 * of_node_put - Decrement refcount of a node 1820 * @node: Node to dec refcount, NULL is supported to 1821 * simplify writing of callers 1822 * 1823 */ 1824 void of_node_put(struct device_node *node) 1825 { 1826 if (node) 1827 kref_put(&node->kref, of_node_release); 1828 } 1829 EXPORT_SYMBOL(of_node_put); 1830 1831 /* 1832 * Plug a device node into the tree and global list. 1833 */ 1834 void of_attach_node(struct device_node *np) 1835 { 1836 write_lock(&devtree_lock); 1837 np->sibling = np->parent->child; 1838 np->allnext = allnodes; 1839 np->parent->child = np; 1840 allnodes = np; 1841 write_unlock(&devtree_lock); 1842 } 1843 1844 /* 1845 * "Unplug" a node from the device tree. The caller must hold 1846 * a reference to the node. The memory associated with the node 1847 * is not freed until its refcount goes to zero. 1848 */ 1849 void of_detach_node(const struct device_node *np) 1850 { 1851 struct device_node *parent; 1852 1853 write_lock(&devtree_lock); 1854 1855 parent = np->parent; 1856 1857 if (allnodes == np) 1858 allnodes = np->allnext; 1859 else { 1860 struct device_node *prev; 1861 for (prev = allnodes; 1862 prev->allnext != np; 1863 prev = prev->allnext) 1864 ; 1865 prev->allnext = np->allnext; 1866 } 1867 1868 if (parent->child == np) 1869 parent->child = np->sibling; 1870 else { 1871 struct device_node *prevsib; 1872 for (prevsib = np->parent->child; 1873 prevsib->sibling != np; 1874 prevsib = prevsib->sibling) 1875 ; 1876 prevsib->sibling = np->sibling; 1877 } 1878 1879 write_unlock(&devtree_lock); 1880 } 1881 1882 #ifdef CONFIG_PPC_PSERIES 1883 /* 1884 * Fix up the uninitialized fields in a new device node: 1885 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields 1886 * 1887 * A lot of boot-time code is duplicated here, because functions such 1888 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the 1889 * slab allocator. 1890 * 1891 * This should probably be split up into smaller chunks. 1892 */ 1893 1894 static int of_finish_dynamic_node(struct device_node *node) 1895 { 1896 struct device_node *parent = of_get_parent(node); 1897 int err = 0; 1898 phandle *ibm_phandle; 1899 1900 node->name = get_property(node, "name", NULL); 1901 node->type = get_property(node, "device_type", NULL); 1902 1903 if (!parent) { 1904 err = -ENODEV; 1905 goto out; 1906 } 1907 1908 /* We don't support that function on PowerMac, at least 1909 * not yet 1910 */ 1911 if (machine_is(powermac)) 1912 return -ENODEV; 1913 1914 /* fix up new node's linux_phandle field */ 1915 if ((ibm_phandle = (unsigned int *)get_property(node, 1916 "ibm,phandle", NULL))) 1917 node->linux_phandle = *ibm_phandle; 1918 1919 out: 1920 of_node_put(parent); 1921 return err; 1922 } 1923 1924 static int prom_reconfig_notifier(struct notifier_block *nb, 1925 unsigned long action, void *node) 1926 { 1927 int err; 1928 1929 switch (action) { 1930 case PSERIES_RECONFIG_ADD: 1931 err = of_finish_dynamic_node(node); 1932 if (!err) 1933 finish_node(node, NULL, 0); 1934 if (err < 0) { 1935 printk(KERN_ERR "finish_node returned %d\n", err); 1936 err = NOTIFY_BAD; 1937 } 1938 break; 1939 default: 1940 err = NOTIFY_DONE; 1941 break; 1942 } 1943 return err; 1944 } 1945 1946 static struct notifier_block prom_reconfig_nb = { 1947 .notifier_call = prom_reconfig_notifier, 1948 .priority = 10, /* This one needs to run first */ 1949 }; 1950 1951 static int __init prom_reconfig_setup(void) 1952 { 1953 return pSeries_reconfig_notifier_register(&prom_reconfig_nb); 1954 } 1955 __initcall(prom_reconfig_setup); 1956 #endif 1957 1958 struct property *of_find_property(struct device_node *np, const char *name, 1959 int *lenp) 1960 { 1961 struct property *pp; 1962 1963 read_lock(&devtree_lock); 1964 for (pp = np->properties; pp != 0; pp = pp->next) 1965 if (strcmp(pp->name, name) == 0) { 1966 if (lenp != 0) 1967 *lenp = pp->length; 1968 break; 1969 } 1970 read_unlock(&devtree_lock); 1971 1972 return pp; 1973 } 1974 1975 /* 1976 * Find a property with a given name for a given node 1977 * and return the value. 1978 */ 1979 unsigned char *get_property(struct device_node *np, const char *name, 1980 int *lenp) 1981 { 1982 struct property *pp = of_find_property(np,name,lenp); 1983 return pp ? pp->value : NULL; 1984 } 1985 EXPORT_SYMBOL(get_property); 1986 1987 /* 1988 * Add a property to a node 1989 */ 1990 int prom_add_property(struct device_node* np, struct property* prop) 1991 { 1992 struct property **next; 1993 1994 prop->next = NULL; 1995 write_lock(&devtree_lock); 1996 next = &np->properties; 1997 while (*next) { 1998 if (strcmp(prop->name, (*next)->name) == 0) { 1999 /* duplicate ! don't insert it */ 2000 write_unlock(&devtree_lock); 2001 return -1; 2002 } 2003 next = &(*next)->next; 2004 } 2005 *next = prop; 2006 write_unlock(&devtree_lock); 2007 2008 #ifdef CONFIG_PROC_DEVICETREE 2009 /* try to add to proc as well if it was initialized */ 2010 if (np->pde) 2011 proc_device_tree_add_prop(np->pde, prop); 2012 #endif /* CONFIG_PROC_DEVICETREE */ 2013 2014 return 0; 2015 } 2016 2017 /* 2018 * Remove a property from a node. Note that we don't actually 2019 * remove it, since we have given out who-knows-how-many pointers 2020 * to the data using get-property. Instead we just move the property 2021 * to the "dead properties" list, so it won't be found any more. 2022 */ 2023 int prom_remove_property(struct device_node *np, struct property *prop) 2024 { 2025 struct property **next; 2026 int found = 0; 2027 2028 write_lock(&devtree_lock); 2029 next = &np->properties; 2030 while (*next) { 2031 if (*next == prop) { 2032 /* found the node */ 2033 *next = prop->next; 2034 prop->next = np->deadprops; 2035 np->deadprops = prop; 2036 found = 1; 2037 break; 2038 } 2039 next = &(*next)->next; 2040 } 2041 write_unlock(&devtree_lock); 2042 2043 if (!found) 2044 return -ENODEV; 2045 2046 #ifdef CONFIG_PROC_DEVICETREE 2047 /* try to remove the proc node as well */ 2048 if (np->pde) 2049 proc_device_tree_remove_prop(np->pde, prop); 2050 #endif /* CONFIG_PROC_DEVICETREE */ 2051 2052 return 0; 2053 } 2054 2055 /* 2056 * Update a property in a node. Note that we don't actually 2057 * remove it, since we have given out who-knows-how-many pointers 2058 * to the data using get-property. Instead we just move the property 2059 * to the "dead properties" list, and add the new property to the 2060 * property list 2061 */ 2062 int prom_update_property(struct device_node *np, 2063 struct property *newprop, 2064 struct property *oldprop) 2065 { 2066 struct property **next; 2067 int found = 0; 2068 2069 write_lock(&devtree_lock); 2070 next = &np->properties; 2071 while (*next) { 2072 if (*next == oldprop) { 2073 /* found the node */ 2074 newprop->next = oldprop->next; 2075 *next = newprop; 2076 oldprop->next = np->deadprops; 2077 np->deadprops = oldprop; 2078 found = 1; 2079 break; 2080 } 2081 next = &(*next)->next; 2082 } 2083 write_unlock(&devtree_lock); 2084 2085 if (!found) 2086 return -ENODEV; 2087 2088 #ifdef CONFIG_PROC_DEVICETREE 2089 /* try to add to proc as well if it was initialized */ 2090 if (np->pde) 2091 proc_device_tree_update_prop(np->pde, newprop, oldprop); 2092 #endif /* CONFIG_PROC_DEVICETREE */ 2093 2094 return 0; 2095 } 2096 2097 2098 /* Find the device node for a given logical cpu number, also returns the cpu 2099 * local thread number (index in ibm,interrupt-server#s) if relevant and 2100 * asked for (non NULL) 2101 */ 2102 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) 2103 { 2104 int hardid; 2105 struct device_node *np; 2106 2107 hardid = get_hard_smp_processor_id(cpu); 2108 2109 for_each_node_by_type(np, "cpu") { 2110 u32 *intserv; 2111 unsigned int plen, t; 2112 2113 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist 2114 * fallback to "reg" property and assume no threads 2115 */ 2116 intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", 2117 &plen); 2118 if (intserv == NULL) { 2119 u32 *reg = (u32 *)get_property(np, "reg", NULL); 2120 if (reg == NULL) 2121 continue; 2122 if (*reg == hardid) { 2123 if (thread) 2124 *thread = 0; 2125 return np; 2126 } 2127 } else { 2128 plen /= sizeof(u32); 2129 for (t = 0; t < plen; t++) { 2130 if (hardid == intserv[t]) { 2131 if (thread) 2132 *thread = t; 2133 return np; 2134 } 2135 } 2136 } 2137 } 2138 return NULL; 2139 } 2140 2141 #ifdef DEBUG 2142 static struct debugfs_blob_wrapper flat_dt_blob; 2143 2144 static int __init export_flat_device_tree(void) 2145 { 2146 struct dentry *d; 2147 2148 d = debugfs_create_dir("powerpc", NULL); 2149 if (!d) 2150 return 1; 2151 2152 flat_dt_blob.data = initial_boot_params; 2153 flat_dt_blob.size = initial_boot_params->totalsize; 2154 2155 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, 2156 d, &flat_dt_blob); 2157 if (!d) 2158 return 1; 2159 2160 return 0; 2161 } 2162 __initcall(export_flat_device_tree); 2163 #endif 2164