1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "OF: " fmt 3 4 #include <linux/device.h> 5 #include <linux/io.h> 6 #include <linux/ioport.h> 7 #include <linux/module.h> 8 #include <linux/of_address.h> 9 #include <linux/pci.h> 10 #include <linux/pci_regs.h> 11 #include <linux/sizes.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 15 /* Max address size we deal with */ 16 #define OF_MAX_ADDR_CELLS 4 17 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) 18 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) 19 20 static struct of_bus *of_match_bus(struct device_node *np); 21 static int __of_address_to_resource(struct device_node *dev, 22 const __be32 *addrp, u64 size, unsigned int flags, 23 const char *name, struct resource *r); 24 25 /* Debug utility */ 26 #ifdef DEBUG 27 static void of_dump_addr(const char *s, const __be32 *addr, int na) 28 { 29 pr_debug("%s", s); 30 while (na--) 31 pr_cont(" %08x", be32_to_cpu(*(addr++))); 32 pr_cont("\n"); 33 } 34 #else 35 static void of_dump_addr(const char *s, const __be32 *addr, int na) { } 36 #endif 37 38 /* Callbacks for bus specific translators */ 39 struct of_bus { 40 const char *name; 41 const char *addresses; 42 int (*match)(struct device_node *parent); 43 void (*count_cells)(struct device_node *child, 44 int *addrc, int *sizec); 45 u64 (*map)(__be32 *addr, const __be32 *range, 46 int na, int ns, int pna); 47 int (*translate)(__be32 *addr, u64 offset, int na); 48 unsigned int (*get_flags)(const __be32 *addr); 49 }; 50 51 /* 52 * Default translator (generic bus) 53 */ 54 55 static void of_bus_default_count_cells(struct device_node *dev, 56 int *addrc, int *sizec) 57 { 58 if (addrc) 59 *addrc = of_n_addr_cells(dev); 60 if (sizec) 61 *sizec = of_n_size_cells(dev); 62 } 63 64 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, 65 int na, int ns, int pna) 66 { 67 u64 cp, s, da; 68 69 cp = of_read_number(range, na); 70 s = of_read_number(range + na + pna, ns); 71 da = of_read_number(addr, na); 72 73 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", 74 (unsigned long long)cp, (unsigned long long)s, 75 (unsigned long long)da); 76 77 if (da < cp || da >= (cp + s)) 78 return OF_BAD_ADDR; 79 return da - cp; 80 } 81 82 static int of_bus_default_translate(__be32 *addr, u64 offset, int na) 83 { 84 u64 a = of_read_number(addr, na); 85 memset(addr, 0, na * 4); 86 a += offset; 87 if (na > 1) 88 addr[na - 2] = cpu_to_be32(a >> 32); 89 addr[na - 1] = cpu_to_be32(a & 0xffffffffu); 90 91 return 0; 92 } 93 94 static unsigned int of_bus_default_get_flags(const __be32 *addr) 95 { 96 return IORESOURCE_MEM; 97 } 98 99 #ifdef CONFIG_PCI 100 /* 101 * PCI bus specific translator 102 */ 103 104 static int of_bus_pci_match(struct device_node *np) 105 { 106 /* 107 * "pciex" is PCI Express 108 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs 109 * "ht" is hypertransport 110 */ 111 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || 112 !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); 113 } 114 115 static void of_bus_pci_count_cells(struct device_node *np, 116 int *addrc, int *sizec) 117 { 118 if (addrc) 119 *addrc = 3; 120 if (sizec) 121 *sizec = 2; 122 } 123 124 static unsigned int of_bus_pci_get_flags(const __be32 *addr) 125 { 126 unsigned int flags = 0; 127 u32 w = be32_to_cpup(addr); 128 129 switch((w >> 24) & 0x03) { 130 case 0x01: 131 flags |= IORESOURCE_IO; 132 break; 133 case 0x02: /* 32 bits */ 134 case 0x03: /* 64 bits */ 135 flags |= IORESOURCE_MEM; 136 break; 137 } 138 if (w & 0x40000000) 139 flags |= IORESOURCE_PREFETCH; 140 return flags; 141 } 142 143 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, 144 int pna) 145 { 146 u64 cp, s, da; 147 unsigned int af, rf; 148 149 af = of_bus_pci_get_flags(addr); 150 rf = of_bus_pci_get_flags(range); 151 152 /* Check address type match */ 153 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) 154 return OF_BAD_ADDR; 155 156 /* Read address values, skipping high cell */ 157 cp = of_read_number(range + 1, na - 1); 158 s = of_read_number(range + na + pna, ns); 159 da = of_read_number(addr + 1, na - 1); 160 161 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", 162 (unsigned long long)cp, (unsigned long long)s, 163 (unsigned long long)da); 164 165 if (da < cp || da >= (cp + s)) 166 return OF_BAD_ADDR; 167 return da - cp; 168 } 169 170 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) 171 { 172 return of_bus_default_translate(addr + 1, offset, na - 1); 173 } 174 175 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 176 unsigned int *flags) 177 { 178 const __be32 *prop; 179 unsigned int psize; 180 struct device_node *parent; 181 struct of_bus *bus; 182 int onesize, i, na, ns; 183 184 /* Get parent & match bus type */ 185 parent = of_get_parent(dev); 186 if (parent == NULL) 187 return NULL; 188 bus = of_match_bus(parent); 189 if (strcmp(bus->name, "pci")) { 190 of_node_put(parent); 191 return NULL; 192 } 193 bus->count_cells(dev, &na, &ns); 194 of_node_put(parent); 195 if (!OF_CHECK_ADDR_COUNT(na)) 196 return NULL; 197 198 /* Get "reg" or "assigned-addresses" property */ 199 prop = of_get_property(dev, bus->addresses, &psize); 200 if (prop == NULL) 201 return NULL; 202 psize /= 4; 203 204 onesize = na + ns; 205 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { 206 u32 val = be32_to_cpu(prop[0]); 207 if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { 208 if (size) 209 *size = of_read_number(prop + na, ns); 210 if (flags) 211 *flags = bus->get_flags(prop); 212 return prop; 213 } 214 } 215 return NULL; 216 } 217 EXPORT_SYMBOL(of_get_pci_address); 218 219 int of_pci_address_to_resource(struct device_node *dev, int bar, 220 struct resource *r) 221 { 222 const __be32 *addrp; 223 u64 size; 224 unsigned int flags; 225 226 addrp = of_get_pci_address(dev, bar, &size, &flags); 227 if (addrp == NULL) 228 return -EINVAL; 229 return __of_address_to_resource(dev, addrp, size, flags, NULL, r); 230 } 231 EXPORT_SYMBOL_GPL(of_pci_address_to_resource); 232 233 static int parser_init(struct of_pci_range_parser *parser, 234 struct device_node *node, const char *name) 235 { 236 const int na = 3, ns = 2; 237 int rlen; 238 239 parser->node = node; 240 parser->pna = of_n_addr_cells(node); 241 parser->np = parser->pna + na + ns; 242 243 parser->range = of_get_property(node, name, &rlen); 244 if (parser->range == NULL) 245 return -ENOENT; 246 247 parser->end = parser->range + rlen / sizeof(__be32); 248 249 return 0; 250 } 251 252 int of_pci_range_parser_init(struct of_pci_range_parser *parser, 253 struct device_node *node) 254 { 255 return parser_init(parser, node, "ranges"); 256 } 257 EXPORT_SYMBOL_GPL(of_pci_range_parser_init); 258 259 int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, 260 struct device_node *node) 261 { 262 return parser_init(parser, node, "dma-ranges"); 263 } 264 EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init); 265 266 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, 267 struct of_pci_range *range) 268 { 269 const int na = 3, ns = 2; 270 271 if (!range) 272 return NULL; 273 274 if (!parser->range || parser->range + parser->np > parser->end) 275 return NULL; 276 277 range->pci_space = be32_to_cpup(parser->range); 278 range->flags = of_bus_pci_get_flags(parser->range); 279 range->pci_addr = of_read_number(parser->range + 1, ns); 280 range->cpu_addr = of_translate_address(parser->node, 281 parser->range + na); 282 range->size = of_read_number(parser->range + parser->pna + na, ns); 283 284 parser->range += parser->np; 285 286 /* Now consume following elements while they are contiguous */ 287 while (parser->range + parser->np <= parser->end) { 288 u32 flags; 289 u64 pci_addr, cpu_addr, size; 290 291 flags = of_bus_pci_get_flags(parser->range); 292 pci_addr = of_read_number(parser->range + 1, ns); 293 cpu_addr = of_translate_address(parser->node, 294 parser->range + na); 295 size = of_read_number(parser->range + parser->pna + na, ns); 296 297 if (flags != range->flags) 298 break; 299 if (pci_addr != range->pci_addr + range->size || 300 cpu_addr != range->cpu_addr + range->size) 301 break; 302 303 range->size += size; 304 parser->range += parser->np; 305 } 306 307 return range; 308 } 309 EXPORT_SYMBOL_GPL(of_pci_range_parser_one); 310 311 /* 312 * of_pci_range_to_resource - Create a resource from an of_pci_range 313 * @range: the PCI range that describes the resource 314 * @np: device node where the range belongs to 315 * @res: pointer to a valid resource that will be updated to 316 * reflect the values contained in the range. 317 * 318 * Returns EINVAL if the range cannot be converted to resource. 319 * 320 * Note that if the range is an IO range, the resource will be converted 321 * using pci_address_to_pio() which can fail if it is called too early or 322 * if the range cannot be matched to any host bridge IO space (our case here). 323 * To guard against that we try to register the IO range first. 324 * If that fails we know that pci_address_to_pio() will do too. 325 */ 326 int of_pci_range_to_resource(struct of_pci_range *range, 327 struct device_node *np, struct resource *res) 328 { 329 int err; 330 res->flags = range->flags; 331 res->parent = res->child = res->sibling = NULL; 332 res->name = np->full_name; 333 334 if (res->flags & IORESOURCE_IO) { 335 unsigned long port; 336 err = pci_register_io_range(range->cpu_addr, range->size); 337 if (err) 338 goto invalid_range; 339 port = pci_address_to_pio(range->cpu_addr); 340 if (port == (unsigned long)-1) { 341 err = -EINVAL; 342 goto invalid_range; 343 } 344 res->start = port; 345 } else { 346 if ((sizeof(resource_size_t) < 8) && 347 upper_32_bits(range->cpu_addr)) { 348 err = -EINVAL; 349 goto invalid_range; 350 } 351 352 res->start = range->cpu_addr; 353 } 354 res->end = res->start + range->size - 1; 355 return 0; 356 357 invalid_range: 358 res->start = (resource_size_t)OF_BAD_ADDR; 359 res->end = (resource_size_t)OF_BAD_ADDR; 360 return err; 361 } 362 EXPORT_SYMBOL(of_pci_range_to_resource); 363 #endif /* CONFIG_PCI */ 364 365 /* 366 * ISA bus specific translator 367 */ 368 369 static int of_bus_isa_match(struct device_node *np) 370 { 371 return !strcmp(np->name, "isa"); 372 } 373 374 static void of_bus_isa_count_cells(struct device_node *child, 375 int *addrc, int *sizec) 376 { 377 if (addrc) 378 *addrc = 2; 379 if (sizec) 380 *sizec = 1; 381 } 382 383 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, 384 int pna) 385 { 386 u64 cp, s, da; 387 388 /* Check address type match */ 389 if ((addr[0] ^ range[0]) & cpu_to_be32(1)) 390 return OF_BAD_ADDR; 391 392 /* Read address values, skipping high cell */ 393 cp = of_read_number(range + 1, na - 1); 394 s = of_read_number(range + na + pna, ns); 395 da = of_read_number(addr + 1, na - 1); 396 397 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", 398 (unsigned long long)cp, (unsigned long long)s, 399 (unsigned long long)da); 400 401 if (da < cp || da >= (cp + s)) 402 return OF_BAD_ADDR; 403 return da - cp; 404 } 405 406 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) 407 { 408 return of_bus_default_translate(addr + 1, offset, na - 1); 409 } 410 411 static unsigned int of_bus_isa_get_flags(const __be32 *addr) 412 { 413 unsigned int flags = 0; 414 u32 w = be32_to_cpup(addr); 415 416 if (w & 1) 417 flags |= IORESOURCE_IO; 418 else 419 flags |= IORESOURCE_MEM; 420 return flags; 421 } 422 423 /* 424 * Array of bus specific translators 425 */ 426 427 static struct of_bus of_busses[] = { 428 #ifdef CONFIG_PCI 429 /* PCI */ 430 { 431 .name = "pci", 432 .addresses = "assigned-addresses", 433 .match = of_bus_pci_match, 434 .count_cells = of_bus_pci_count_cells, 435 .map = of_bus_pci_map, 436 .translate = of_bus_pci_translate, 437 .get_flags = of_bus_pci_get_flags, 438 }, 439 #endif /* CONFIG_PCI */ 440 /* ISA */ 441 { 442 .name = "isa", 443 .addresses = "reg", 444 .match = of_bus_isa_match, 445 .count_cells = of_bus_isa_count_cells, 446 .map = of_bus_isa_map, 447 .translate = of_bus_isa_translate, 448 .get_flags = of_bus_isa_get_flags, 449 }, 450 /* Default */ 451 { 452 .name = "default", 453 .addresses = "reg", 454 .match = NULL, 455 .count_cells = of_bus_default_count_cells, 456 .map = of_bus_default_map, 457 .translate = of_bus_default_translate, 458 .get_flags = of_bus_default_get_flags, 459 }, 460 }; 461 462 static struct of_bus *of_match_bus(struct device_node *np) 463 { 464 int i; 465 466 for (i = 0; i < ARRAY_SIZE(of_busses); i++) 467 if (!of_busses[i].match || of_busses[i].match(np)) 468 return &of_busses[i]; 469 BUG(); 470 return NULL; 471 } 472 473 static int of_empty_ranges_quirk(struct device_node *np) 474 { 475 if (IS_ENABLED(CONFIG_PPC)) { 476 /* To save cycles, we cache the result for global "Mac" setting */ 477 static int quirk_state = -1; 478 479 /* PA-SEMI sdc DT bug */ 480 if (of_device_is_compatible(np, "1682m-sdc")) 481 return true; 482 483 /* Make quirk cached */ 484 if (quirk_state < 0) 485 quirk_state = 486 of_machine_is_compatible("Power Macintosh") || 487 of_machine_is_compatible("MacRISC"); 488 return quirk_state; 489 } 490 return false; 491 } 492 493 static int of_translate_one(struct device_node *parent, struct of_bus *bus, 494 struct of_bus *pbus, __be32 *addr, 495 int na, int ns, int pna, const char *rprop) 496 { 497 const __be32 *ranges; 498 unsigned int rlen; 499 int rone; 500 u64 offset = OF_BAD_ADDR; 501 502 /* 503 * Normally, an absence of a "ranges" property means we are 504 * crossing a non-translatable boundary, and thus the addresses 505 * below the current cannot be converted to CPU physical ones. 506 * Unfortunately, while this is very clear in the spec, it's not 507 * what Apple understood, and they do have things like /uni-n or 508 * /ht nodes with no "ranges" property and a lot of perfectly 509 * useable mapped devices below them. Thus we treat the absence of 510 * "ranges" as equivalent to an empty "ranges" property which means 511 * a 1:1 translation at that level. It's up to the caller not to try 512 * to translate addresses that aren't supposed to be translated in 513 * the first place. --BenH. 514 * 515 * As far as we know, this damage only exists on Apple machines, so 516 * This code is only enabled on powerpc. --gcl 517 */ 518 ranges = of_get_property(parent, rprop, &rlen); 519 if (ranges == NULL && !of_empty_ranges_quirk(parent)) { 520 pr_debug("no ranges; cannot translate\n"); 521 return 1; 522 } 523 if (ranges == NULL || rlen == 0) { 524 offset = of_read_number(addr, na); 525 memset(addr, 0, pna * 4); 526 pr_debug("empty ranges; 1:1 translation\n"); 527 goto finish; 528 } 529 530 pr_debug("walking ranges...\n"); 531 532 /* Now walk through the ranges */ 533 rlen /= 4; 534 rone = na + pna + ns; 535 for (; rlen >= rone; rlen -= rone, ranges += rone) { 536 offset = bus->map(addr, ranges, na, ns, pna); 537 if (offset != OF_BAD_ADDR) 538 break; 539 } 540 if (offset == OF_BAD_ADDR) { 541 pr_debug("not found !\n"); 542 return 1; 543 } 544 memcpy(addr, ranges + na, 4 * pna); 545 546 finish: 547 of_dump_addr("parent translation for:", addr, pna); 548 pr_debug("with offset: %llx\n", (unsigned long long)offset); 549 550 /* Translate it into parent bus space */ 551 return pbus->translate(addr, offset, pna); 552 } 553 554 /* 555 * Translate an address from the device-tree into a CPU physical address, 556 * this walks up the tree and applies the various bus mappings on the 557 * way. 558 * 559 * Note: We consider that crossing any level with #size-cells == 0 to mean 560 * that translation is impossible (that is we are not dealing with a value 561 * that can be mapped to a cpu physical address). This is not really specified 562 * that way, but this is traditionally the way IBM at least do things 563 */ 564 static u64 __of_translate_address(struct device_node *dev, 565 const __be32 *in_addr, const char *rprop) 566 { 567 struct device_node *parent = NULL; 568 struct of_bus *bus, *pbus; 569 __be32 addr[OF_MAX_ADDR_CELLS]; 570 int na, ns, pna, pns; 571 u64 result = OF_BAD_ADDR; 572 573 pr_debug("** translation for device %pOF **\n", dev); 574 575 /* Increase refcount at current level */ 576 of_node_get(dev); 577 578 /* Get parent & match bus type */ 579 parent = of_get_parent(dev); 580 if (parent == NULL) 581 goto bail; 582 bus = of_match_bus(parent); 583 584 /* Count address cells & copy address locally */ 585 bus->count_cells(dev, &na, &ns); 586 if (!OF_CHECK_COUNTS(na, ns)) { 587 pr_debug("Bad cell count for %pOF\n", dev); 588 goto bail; 589 } 590 memcpy(addr, in_addr, na * 4); 591 592 pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n", 593 bus->name, na, ns, parent); 594 of_dump_addr("translating address:", addr, na); 595 596 /* Translate */ 597 for (;;) { 598 /* Switch to parent bus */ 599 of_node_put(dev); 600 dev = parent; 601 parent = of_get_parent(dev); 602 603 /* If root, we have finished */ 604 if (parent == NULL) { 605 pr_debug("reached root node\n"); 606 result = of_read_number(addr, na); 607 break; 608 } 609 610 /* Get new parent bus and counts */ 611 pbus = of_match_bus(parent); 612 pbus->count_cells(dev, &pna, &pns); 613 if (!OF_CHECK_COUNTS(pna, pns)) { 614 pr_err("Bad cell count for %pOF\n", dev); 615 break; 616 } 617 618 pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n", 619 pbus->name, pna, pns, parent); 620 621 /* Apply bus translation */ 622 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) 623 break; 624 625 /* Complete the move up one level */ 626 na = pna; 627 ns = pns; 628 bus = pbus; 629 630 of_dump_addr("one level translation:", addr, na); 631 } 632 bail: 633 of_node_put(parent); 634 of_node_put(dev); 635 636 return result; 637 } 638 639 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) 640 { 641 return __of_translate_address(dev, in_addr, "ranges"); 642 } 643 EXPORT_SYMBOL(of_translate_address); 644 645 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) 646 { 647 return __of_translate_address(dev, in_addr, "dma-ranges"); 648 } 649 EXPORT_SYMBOL(of_translate_dma_address); 650 651 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, 652 unsigned int *flags) 653 { 654 const __be32 *prop; 655 unsigned int psize; 656 struct device_node *parent; 657 struct of_bus *bus; 658 int onesize, i, na, ns; 659 660 /* Get parent & match bus type */ 661 parent = of_get_parent(dev); 662 if (parent == NULL) 663 return NULL; 664 bus = of_match_bus(parent); 665 bus->count_cells(dev, &na, &ns); 666 of_node_put(parent); 667 if (!OF_CHECK_ADDR_COUNT(na)) 668 return NULL; 669 670 /* Get "reg" or "assigned-addresses" property */ 671 prop = of_get_property(dev, bus->addresses, &psize); 672 if (prop == NULL) 673 return NULL; 674 psize /= 4; 675 676 onesize = na + ns; 677 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 678 if (i == index) { 679 if (size) 680 *size = of_read_number(prop + na, ns); 681 if (flags) 682 *flags = bus->get_flags(prop); 683 return prop; 684 } 685 return NULL; 686 } 687 EXPORT_SYMBOL(of_get_address); 688 689 static int __of_address_to_resource(struct device_node *dev, 690 const __be32 *addrp, u64 size, unsigned int flags, 691 const char *name, struct resource *r) 692 { 693 u64 taddr; 694 695 if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 696 return -EINVAL; 697 taddr = of_translate_address(dev, addrp); 698 if (taddr == OF_BAD_ADDR) 699 return -EINVAL; 700 memset(r, 0, sizeof(struct resource)); 701 if (flags & IORESOURCE_IO) { 702 unsigned long port; 703 port = pci_address_to_pio(taddr); 704 if (port == (unsigned long)-1) 705 return -EINVAL; 706 r->start = port; 707 r->end = port + size - 1; 708 } else { 709 r->start = taddr; 710 r->end = taddr + size - 1; 711 } 712 r->flags = flags; 713 r->name = name ? name : dev->full_name; 714 715 return 0; 716 } 717 718 /** 719 * of_address_to_resource - Translate device tree address and return as resource 720 * 721 * Note that if your address is a PIO address, the conversion will fail if 722 * the physical address can't be internally converted to an IO token with 723 * pci_address_to_pio(), that is because it's either called too early or it 724 * can't be matched to any host bridge IO space 725 */ 726 int of_address_to_resource(struct device_node *dev, int index, 727 struct resource *r) 728 { 729 const __be32 *addrp; 730 u64 size; 731 unsigned int flags; 732 const char *name = NULL; 733 734 addrp = of_get_address(dev, index, &size, &flags); 735 if (addrp == NULL) 736 return -EINVAL; 737 738 /* Get optional "reg-names" property to add a name to a resource */ 739 of_property_read_string_index(dev, "reg-names", index, &name); 740 741 return __of_address_to_resource(dev, addrp, size, flags, name, r); 742 } 743 EXPORT_SYMBOL_GPL(of_address_to_resource); 744 745 struct device_node *of_find_matching_node_by_address(struct device_node *from, 746 const struct of_device_id *matches, 747 u64 base_address) 748 { 749 struct device_node *dn = of_find_matching_node(from, matches); 750 struct resource res; 751 752 while (dn) { 753 if (!of_address_to_resource(dn, 0, &res) && 754 res.start == base_address) 755 return dn; 756 757 dn = of_find_matching_node(dn, matches); 758 } 759 760 return NULL; 761 } 762 763 764 /** 765 * of_iomap - Maps the memory mapped IO for a given device_node 766 * @device: the device whose io range will be mapped 767 * @index: index of the io range 768 * 769 * Returns a pointer to the mapped memory 770 */ 771 void __iomem *of_iomap(struct device_node *np, int index) 772 { 773 struct resource res; 774 775 if (of_address_to_resource(np, index, &res)) 776 return NULL; 777 778 return ioremap(res.start, resource_size(&res)); 779 } 780 EXPORT_SYMBOL(of_iomap); 781 782 /* 783 * of_io_request_and_map - Requests a resource and maps the memory mapped IO 784 * for a given device_node 785 * @device: the device whose io range will be mapped 786 * @index: index of the io range 787 * @name: name of the resource 788 * 789 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded 790 * error code on failure. Usage example: 791 * 792 * base = of_io_request_and_map(node, 0, "foo"); 793 * if (IS_ERR(base)) 794 * return PTR_ERR(base); 795 */ 796 void __iomem *of_io_request_and_map(struct device_node *np, int index, 797 const char *name) 798 { 799 struct resource res; 800 void __iomem *mem; 801 802 if (of_address_to_resource(np, index, &res)) 803 return IOMEM_ERR_PTR(-EINVAL); 804 805 if (!request_mem_region(res.start, resource_size(&res), name)) 806 return IOMEM_ERR_PTR(-EBUSY); 807 808 mem = ioremap(res.start, resource_size(&res)); 809 if (!mem) { 810 release_mem_region(res.start, resource_size(&res)); 811 return IOMEM_ERR_PTR(-ENOMEM); 812 } 813 814 return mem; 815 } 816 EXPORT_SYMBOL(of_io_request_and_map); 817 818 /** 819 * of_dma_get_range - Get DMA range info 820 * @np: device node to get DMA range info 821 * @dma_addr: pointer to store initial DMA address of DMA range 822 * @paddr: pointer to store initial CPU address of DMA range 823 * @size: pointer to store size of DMA range 824 * 825 * Look in bottom up direction for the first "dma-ranges" property 826 * and parse it. 827 * dma-ranges format: 828 * DMA addr (dma_addr) : naddr cells 829 * CPU addr (phys_addr_t) : pna cells 830 * size : nsize cells 831 * 832 * It returns -ENODEV if "dma-ranges" property was not found 833 * for this device in DT. 834 */ 835 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) 836 { 837 struct device_node *node = of_node_get(np); 838 const __be32 *ranges = NULL; 839 int len, naddr, nsize, pna; 840 int ret = 0; 841 u64 dmaaddr; 842 843 if (!node) 844 return -EINVAL; 845 846 while (1) { 847 naddr = of_n_addr_cells(node); 848 nsize = of_n_size_cells(node); 849 node = of_get_next_parent(node); 850 if (!node) 851 break; 852 853 ranges = of_get_property(node, "dma-ranges", &len); 854 855 /* Ignore empty ranges, they imply no translation required */ 856 if (ranges && len > 0) 857 break; 858 859 /* 860 * At least empty ranges has to be defined for parent node if 861 * DMA is supported 862 */ 863 if (!ranges) 864 break; 865 } 866 867 if (!ranges) { 868 pr_debug("no dma-ranges found for node(%pOF)\n", np); 869 ret = -ENODEV; 870 goto out; 871 } 872 873 len /= sizeof(u32); 874 875 pna = of_n_addr_cells(node); 876 877 /* dma-ranges format: 878 * DMA addr : naddr cells 879 * CPU addr : pna cells 880 * size : nsize cells 881 */ 882 dmaaddr = of_read_number(ranges, naddr); 883 *paddr = of_translate_dma_address(np, ranges); 884 if (*paddr == OF_BAD_ADDR) { 885 pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n", 886 dma_addr, np); 887 ret = -EINVAL; 888 goto out; 889 } 890 *dma_addr = dmaaddr; 891 892 *size = of_read_number(ranges + naddr + pna, nsize); 893 894 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 895 *dma_addr, *paddr, *size); 896 897 out: 898 of_node_put(node); 899 900 return ret; 901 } 902 EXPORT_SYMBOL_GPL(of_dma_get_range); 903 904 /** 905 * of_dma_is_coherent - Check if device is coherent 906 * @np: device node 907 * 908 * It returns true if "dma-coherent" property was found 909 * for this device in DT. 910 */ 911 bool of_dma_is_coherent(struct device_node *np) 912 { 913 struct device_node *node = of_node_get(np); 914 915 while (node) { 916 if (of_property_read_bool(node, "dma-coherent")) { 917 of_node_put(node); 918 return true; 919 } 920 node = of_get_next_parent(node); 921 } 922 of_node_put(node); 923 return false; 924 } 925 EXPORT_SYMBOL_GPL(of_dma_is_coherent); 926