1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "OF: " fmt 3 4 #include <linux/device.h> 5 #include <linux/fwnode.h> 6 #include <linux/io.h> 7 #include <linux/ioport.h> 8 #include <linux/logic_pio.h> 9 #include <linux/module.h> 10 #include <linux/of_address.h> 11 #include <linux/pci.h> 12 #include <linux/pci_regs.h> 13 #include <linux/sizes.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #include "of_private.h" 18 19 /* Max address size we deal with */ 20 #define OF_MAX_ADDR_CELLS 4 21 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) 22 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) 23 24 static struct of_bus *of_match_bus(struct device_node *np); 25 static int __of_address_to_resource(struct device_node *dev, 26 const __be32 *addrp, u64 size, unsigned int flags, 27 const char *name, struct resource *r); 28 29 /* Debug utility */ 30 #ifdef DEBUG 31 static void of_dump_addr(const char *s, const __be32 *addr, int na) 32 { 33 pr_debug("%s", s); 34 while (na--) 35 pr_cont(" %08x", be32_to_cpu(*(addr++))); 36 pr_cont("\n"); 37 } 38 #else 39 static void of_dump_addr(const char *s, const __be32 *addr, int na) { } 40 #endif 41 42 /* Callbacks for bus specific translators */ 43 struct of_bus { 44 const char *name; 45 const char *addresses; 46 int (*match)(struct device_node *parent); 47 void (*count_cells)(struct device_node *child, 48 int *addrc, int *sizec); 49 u64 (*map)(__be32 *addr, const __be32 *range, 50 int na, int ns, int pna); 51 int (*translate)(__be32 *addr, u64 offset, int na); 52 bool has_flags; 53 unsigned int (*get_flags)(const __be32 *addr); 54 }; 55 56 /* 57 * Default translator (generic bus) 58 */ 59 60 static void of_bus_default_count_cells(struct device_node *dev, 61 int *addrc, int *sizec) 62 { 63 if (addrc) 64 *addrc = of_n_addr_cells(dev); 65 if (sizec) 66 *sizec = of_n_size_cells(dev); 67 } 68 69 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, 70 int na, int ns, int pna) 71 { 72 u64 cp, s, da; 73 74 cp = of_read_number(range, na); 75 s = of_read_number(range + na + pna, ns); 76 da = of_read_number(addr, na); 77 78 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", 79 (unsigned long long)cp, (unsigned long long)s, 80 (unsigned long long)da); 81 82 if (da < cp || da >= (cp + s)) 83 return OF_BAD_ADDR; 84 return da - cp; 85 } 86 87 static int of_bus_default_translate(__be32 *addr, u64 offset, int na) 88 { 89 u64 a = of_read_number(addr, na); 90 memset(addr, 0, na * 4); 91 a += offset; 92 if (na > 1) 93 addr[na - 2] = cpu_to_be32(a >> 32); 94 addr[na - 1] = cpu_to_be32(a & 0xffffffffu); 95 96 return 0; 97 } 98 99 static unsigned int of_bus_default_get_flags(const __be32 *addr) 100 { 101 return IORESOURCE_MEM; 102 } 103 104 #ifdef CONFIG_PCI 105 static unsigned int of_bus_pci_get_flags(const __be32 *addr) 106 { 107 unsigned int flags = 0; 108 u32 w = be32_to_cpup(addr); 109 110 if (!IS_ENABLED(CONFIG_PCI)) 111 return 0; 112 113 switch((w >> 24) & 0x03) { 114 case 0x01: 115 flags |= IORESOURCE_IO; 116 break; 117 case 0x02: /* 32 bits */ 118 case 0x03: /* 64 bits */ 119 flags |= IORESOURCE_MEM; 120 break; 121 } 122 if (w & 0x40000000) 123 flags |= IORESOURCE_PREFETCH; 124 return flags; 125 } 126 127 /* 128 * PCI bus specific translator 129 */ 130 131 static int of_bus_pci_match(struct device_node *np) 132 { 133 /* 134 * "pciex" is PCI Express 135 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs 136 * "ht" is hypertransport 137 */ 138 return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") || 139 of_node_is_type(np, "vci") || of_node_is_type(np, "ht"); 140 } 141 142 static void of_bus_pci_count_cells(struct device_node *np, 143 int *addrc, int *sizec) 144 { 145 if (addrc) 146 *addrc = 3; 147 if (sizec) 148 *sizec = 2; 149 } 150 151 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, 152 int pna) 153 { 154 u64 cp, s, da; 155 unsigned int af, rf; 156 157 af = of_bus_pci_get_flags(addr); 158 rf = of_bus_pci_get_flags(range); 159 160 /* Check address type match */ 161 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) 162 return OF_BAD_ADDR; 163 164 /* Read address values, skipping high cell */ 165 cp = of_read_number(range + 1, na - 1); 166 s = of_read_number(range + na + pna, ns); 167 da = of_read_number(addr + 1, na - 1); 168 169 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", 170 (unsigned long long)cp, (unsigned long long)s, 171 (unsigned long long)da); 172 173 if (da < cp || da >= (cp + s)) 174 return OF_BAD_ADDR; 175 return da - cp; 176 } 177 178 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) 179 { 180 return of_bus_default_translate(addr + 1, offset, na - 1); 181 } 182 183 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 184 unsigned int *flags) 185 { 186 const __be32 *prop; 187 unsigned int psize; 188 struct device_node *parent; 189 struct of_bus *bus; 190 int onesize, i, na, ns; 191 192 /* Get parent & match bus type */ 193 parent = of_get_parent(dev); 194 if (parent == NULL) 195 return NULL; 196 bus = of_match_bus(parent); 197 if (strcmp(bus->name, "pci")) { 198 of_node_put(parent); 199 return NULL; 200 } 201 bus->count_cells(dev, &na, &ns); 202 of_node_put(parent); 203 if (!OF_CHECK_ADDR_COUNT(na)) 204 return NULL; 205 206 /* Get "reg" or "assigned-addresses" property */ 207 prop = of_get_property(dev, bus->addresses, &psize); 208 if (prop == NULL) 209 return NULL; 210 psize /= 4; 211 212 onesize = na + ns; 213 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { 214 u32 val = be32_to_cpu(prop[0]); 215 if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { 216 if (size) 217 *size = of_read_number(prop + na, ns); 218 if (flags) 219 *flags = bus->get_flags(prop); 220 return prop; 221 } 222 } 223 return NULL; 224 } 225 EXPORT_SYMBOL(of_get_pci_address); 226 227 int of_pci_address_to_resource(struct device_node *dev, int bar, 228 struct resource *r) 229 { 230 const __be32 *addrp; 231 u64 size; 232 unsigned int flags; 233 234 addrp = of_get_pci_address(dev, bar, &size, &flags); 235 if (addrp == NULL) 236 return -EINVAL; 237 return __of_address_to_resource(dev, addrp, size, flags, NULL, r); 238 } 239 EXPORT_SYMBOL_GPL(of_pci_address_to_resource); 240 241 /* 242 * of_pci_range_to_resource - Create a resource from an of_pci_range 243 * @range: the PCI range that describes the resource 244 * @np: device node where the range belongs to 245 * @res: pointer to a valid resource that will be updated to 246 * reflect the values contained in the range. 247 * 248 * Returns EINVAL if the range cannot be converted to resource. 249 * 250 * Note that if the range is an IO range, the resource will be converted 251 * using pci_address_to_pio() which can fail if it is called too early or 252 * if the range cannot be matched to any host bridge IO space (our case here). 253 * To guard against that we try to register the IO range first. 254 * If that fails we know that pci_address_to_pio() will do too. 255 */ 256 int of_pci_range_to_resource(struct of_pci_range *range, 257 struct device_node *np, struct resource *res) 258 { 259 int err; 260 res->flags = range->flags; 261 res->parent = res->child = res->sibling = NULL; 262 res->name = np->full_name; 263 264 if (res->flags & IORESOURCE_IO) { 265 unsigned long port; 266 err = pci_register_io_range(&np->fwnode, range->cpu_addr, 267 range->size); 268 if (err) 269 goto invalid_range; 270 port = pci_address_to_pio(range->cpu_addr); 271 if (port == (unsigned long)-1) { 272 err = -EINVAL; 273 goto invalid_range; 274 } 275 res->start = port; 276 } else { 277 if ((sizeof(resource_size_t) < 8) && 278 upper_32_bits(range->cpu_addr)) { 279 err = -EINVAL; 280 goto invalid_range; 281 } 282 283 res->start = range->cpu_addr; 284 } 285 res->end = res->start + range->size - 1; 286 return 0; 287 288 invalid_range: 289 res->start = (resource_size_t)OF_BAD_ADDR; 290 res->end = (resource_size_t)OF_BAD_ADDR; 291 return err; 292 } 293 EXPORT_SYMBOL(of_pci_range_to_resource); 294 #endif /* CONFIG_PCI */ 295 296 /* 297 * ISA bus specific translator 298 */ 299 300 static int of_bus_isa_match(struct device_node *np) 301 { 302 return of_node_name_eq(np, "isa"); 303 } 304 305 static void of_bus_isa_count_cells(struct device_node *child, 306 int *addrc, int *sizec) 307 { 308 if (addrc) 309 *addrc = 2; 310 if (sizec) 311 *sizec = 1; 312 } 313 314 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, 315 int pna) 316 { 317 u64 cp, s, da; 318 319 /* Check address type match */ 320 if ((addr[0] ^ range[0]) & cpu_to_be32(1)) 321 return OF_BAD_ADDR; 322 323 /* Read address values, skipping high cell */ 324 cp = of_read_number(range + 1, na - 1); 325 s = of_read_number(range + na + pna, ns); 326 da = of_read_number(addr + 1, na - 1); 327 328 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", 329 (unsigned long long)cp, (unsigned long long)s, 330 (unsigned long long)da); 331 332 if (da < cp || da >= (cp + s)) 333 return OF_BAD_ADDR; 334 return da - cp; 335 } 336 337 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) 338 { 339 return of_bus_default_translate(addr + 1, offset, na - 1); 340 } 341 342 static unsigned int of_bus_isa_get_flags(const __be32 *addr) 343 { 344 unsigned int flags = 0; 345 u32 w = be32_to_cpup(addr); 346 347 if (w & 1) 348 flags |= IORESOURCE_IO; 349 else 350 flags |= IORESOURCE_MEM; 351 return flags; 352 } 353 354 /* 355 * Array of bus specific translators 356 */ 357 358 static struct of_bus of_busses[] = { 359 #ifdef CONFIG_PCI 360 /* PCI */ 361 { 362 .name = "pci", 363 .addresses = "assigned-addresses", 364 .match = of_bus_pci_match, 365 .count_cells = of_bus_pci_count_cells, 366 .map = of_bus_pci_map, 367 .translate = of_bus_pci_translate, 368 .has_flags = true, 369 .get_flags = of_bus_pci_get_flags, 370 }, 371 #endif /* CONFIG_PCI */ 372 /* ISA */ 373 { 374 .name = "isa", 375 .addresses = "reg", 376 .match = of_bus_isa_match, 377 .count_cells = of_bus_isa_count_cells, 378 .map = of_bus_isa_map, 379 .translate = of_bus_isa_translate, 380 .has_flags = true, 381 .get_flags = of_bus_isa_get_flags, 382 }, 383 /* Default */ 384 { 385 .name = "default", 386 .addresses = "reg", 387 .match = NULL, 388 .count_cells = of_bus_default_count_cells, 389 .map = of_bus_default_map, 390 .translate = of_bus_default_translate, 391 .get_flags = of_bus_default_get_flags, 392 }, 393 }; 394 395 static struct of_bus *of_match_bus(struct device_node *np) 396 { 397 int i; 398 399 for (i = 0; i < ARRAY_SIZE(of_busses); i++) 400 if (!of_busses[i].match || of_busses[i].match(np)) 401 return &of_busses[i]; 402 BUG(); 403 return NULL; 404 } 405 406 static int of_empty_ranges_quirk(struct device_node *np) 407 { 408 if (IS_ENABLED(CONFIG_PPC)) { 409 /* To save cycles, we cache the result for global "Mac" setting */ 410 static int quirk_state = -1; 411 412 /* PA-SEMI sdc DT bug */ 413 if (of_device_is_compatible(np, "1682m-sdc")) 414 return true; 415 416 /* Make quirk cached */ 417 if (quirk_state < 0) 418 quirk_state = 419 of_machine_is_compatible("Power Macintosh") || 420 of_machine_is_compatible("MacRISC"); 421 return quirk_state; 422 } 423 return false; 424 } 425 426 static int of_translate_one(struct device_node *parent, struct of_bus *bus, 427 struct of_bus *pbus, __be32 *addr, 428 int na, int ns, int pna, const char *rprop) 429 { 430 const __be32 *ranges; 431 unsigned int rlen; 432 int rone; 433 u64 offset = OF_BAD_ADDR; 434 435 /* 436 * Normally, an absence of a "ranges" property means we are 437 * crossing a non-translatable boundary, and thus the addresses 438 * below the current cannot be converted to CPU physical ones. 439 * Unfortunately, while this is very clear in the spec, it's not 440 * what Apple understood, and they do have things like /uni-n or 441 * /ht nodes with no "ranges" property and a lot of perfectly 442 * useable mapped devices below them. Thus we treat the absence of 443 * "ranges" as equivalent to an empty "ranges" property which means 444 * a 1:1 translation at that level. It's up to the caller not to try 445 * to translate addresses that aren't supposed to be translated in 446 * the first place. --BenH. 447 * 448 * As far as we know, this damage only exists on Apple machines, so 449 * This code is only enabled on powerpc. --gcl 450 * 451 * This quirk also applies for 'dma-ranges' which frequently exist in 452 * child nodes without 'dma-ranges' in the parent nodes. --RobH 453 */ 454 ranges = of_get_property(parent, rprop, &rlen); 455 if (ranges == NULL && !of_empty_ranges_quirk(parent) && 456 strcmp(rprop, "dma-ranges")) { 457 pr_debug("no ranges; cannot translate\n"); 458 return 1; 459 } 460 if (ranges == NULL || rlen == 0) { 461 offset = of_read_number(addr, na); 462 memset(addr, 0, pna * 4); 463 pr_debug("empty ranges; 1:1 translation\n"); 464 goto finish; 465 } 466 467 pr_debug("walking ranges...\n"); 468 469 /* Now walk through the ranges */ 470 rlen /= 4; 471 rone = na + pna + ns; 472 for (; rlen >= rone; rlen -= rone, ranges += rone) { 473 offset = bus->map(addr, ranges, na, ns, pna); 474 if (offset != OF_BAD_ADDR) 475 break; 476 } 477 if (offset == OF_BAD_ADDR) { 478 pr_debug("not found !\n"); 479 return 1; 480 } 481 memcpy(addr, ranges + na, 4 * pna); 482 483 finish: 484 of_dump_addr("parent translation for:", addr, pna); 485 pr_debug("with offset: %llx\n", (unsigned long long)offset); 486 487 /* Translate it into parent bus space */ 488 return pbus->translate(addr, offset, pna); 489 } 490 491 /* 492 * Translate an address from the device-tree into a CPU physical address, 493 * this walks up the tree and applies the various bus mappings on the 494 * way. 495 * 496 * Note: We consider that crossing any level with #size-cells == 0 to mean 497 * that translation is impossible (that is we are not dealing with a value 498 * that can be mapped to a cpu physical address). This is not really specified 499 * that way, but this is traditionally the way IBM at least do things 500 * 501 * Whenever the translation fails, the *host pointer will be set to the 502 * device that had registered logical PIO mapping, and the return code is 503 * relative to that node. 504 */ 505 static u64 __of_translate_address(struct device_node *dev, 506 struct device_node *(*get_parent)(const struct device_node *), 507 const __be32 *in_addr, const char *rprop, 508 struct device_node **host) 509 { 510 struct device_node *parent = NULL; 511 struct of_bus *bus, *pbus; 512 __be32 addr[OF_MAX_ADDR_CELLS]; 513 int na, ns, pna, pns; 514 u64 result = OF_BAD_ADDR; 515 516 pr_debug("** translation for device %pOF **\n", dev); 517 518 /* Increase refcount at current level */ 519 of_node_get(dev); 520 521 *host = NULL; 522 /* Get parent & match bus type */ 523 parent = get_parent(dev); 524 if (parent == NULL) 525 goto bail; 526 bus = of_match_bus(parent); 527 528 /* Count address cells & copy address locally */ 529 bus->count_cells(dev, &na, &ns); 530 if (!OF_CHECK_COUNTS(na, ns)) { 531 pr_debug("Bad cell count for %pOF\n", dev); 532 goto bail; 533 } 534 memcpy(addr, in_addr, na * 4); 535 536 pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n", 537 bus->name, na, ns, parent); 538 of_dump_addr("translating address:", addr, na); 539 540 /* Translate */ 541 for (;;) { 542 struct logic_pio_hwaddr *iorange; 543 544 /* Switch to parent bus */ 545 of_node_put(dev); 546 dev = parent; 547 parent = get_parent(dev); 548 549 /* If root, we have finished */ 550 if (parent == NULL) { 551 pr_debug("reached root node\n"); 552 result = of_read_number(addr, na); 553 break; 554 } 555 556 /* 557 * For indirectIO device which has no ranges property, get 558 * the address from reg directly. 559 */ 560 iorange = find_io_range_by_fwnode(&dev->fwnode); 561 if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) { 562 result = of_read_number(addr + 1, na - 1); 563 pr_debug("indirectIO matched(%pOF) 0x%llx\n", 564 dev, result); 565 *host = of_node_get(dev); 566 break; 567 } 568 569 /* Get new parent bus and counts */ 570 pbus = of_match_bus(parent); 571 pbus->count_cells(dev, &pna, &pns); 572 if (!OF_CHECK_COUNTS(pna, pns)) { 573 pr_err("Bad cell count for %pOF\n", dev); 574 break; 575 } 576 577 pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n", 578 pbus->name, pna, pns, parent); 579 580 /* Apply bus translation */ 581 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) 582 break; 583 584 /* Complete the move up one level */ 585 na = pna; 586 ns = pns; 587 bus = pbus; 588 589 of_dump_addr("one level translation:", addr, na); 590 } 591 bail: 592 of_node_put(parent); 593 of_node_put(dev); 594 595 return result; 596 } 597 598 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) 599 { 600 struct device_node *host; 601 u64 ret; 602 603 ret = __of_translate_address(dev, of_get_parent, 604 in_addr, "ranges", &host); 605 if (host) { 606 of_node_put(host); 607 return OF_BAD_ADDR; 608 } 609 610 return ret; 611 } 612 EXPORT_SYMBOL(of_translate_address); 613 614 static struct device_node *__of_get_dma_parent(const struct device_node *np) 615 { 616 struct of_phandle_args args; 617 int ret, index; 618 619 index = of_property_match_string(np, "interconnect-names", "dma-mem"); 620 if (index < 0) 621 return of_get_parent(np); 622 623 ret = of_parse_phandle_with_args(np, "interconnects", 624 "#interconnect-cells", 625 index, &args); 626 if (ret < 0) 627 return of_get_parent(np); 628 629 return of_node_get(args.np); 630 } 631 632 static struct device_node *of_get_next_dma_parent(struct device_node *np) 633 { 634 struct device_node *parent; 635 636 parent = __of_get_dma_parent(np); 637 of_node_put(np); 638 639 return parent; 640 } 641 642 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) 643 { 644 struct device_node *host; 645 u64 ret; 646 647 ret = __of_translate_address(dev, __of_get_dma_parent, 648 in_addr, "dma-ranges", &host); 649 650 if (host) { 651 of_node_put(host); 652 return OF_BAD_ADDR; 653 } 654 655 return ret; 656 } 657 EXPORT_SYMBOL(of_translate_dma_address); 658 659 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, 660 unsigned int *flags) 661 { 662 const __be32 *prop; 663 unsigned int psize; 664 struct device_node *parent; 665 struct of_bus *bus; 666 int onesize, i, na, ns; 667 668 /* Get parent & match bus type */ 669 parent = of_get_parent(dev); 670 if (parent == NULL) 671 return NULL; 672 bus = of_match_bus(parent); 673 bus->count_cells(dev, &na, &ns); 674 of_node_put(parent); 675 if (!OF_CHECK_ADDR_COUNT(na)) 676 return NULL; 677 678 /* Get "reg" or "assigned-addresses" property */ 679 prop = of_get_property(dev, bus->addresses, &psize); 680 if (prop == NULL) 681 return NULL; 682 psize /= 4; 683 684 onesize = na + ns; 685 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 686 if (i == index) { 687 if (size) 688 *size = of_read_number(prop + na, ns); 689 if (flags) 690 *flags = bus->get_flags(prop); 691 return prop; 692 } 693 return NULL; 694 } 695 EXPORT_SYMBOL(of_get_address); 696 697 static int parser_init(struct of_pci_range_parser *parser, 698 struct device_node *node, const char *name) 699 { 700 int rlen; 701 702 parser->node = node; 703 parser->pna = of_n_addr_cells(node); 704 parser->na = of_bus_n_addr_cells(node); 705 parser->ns = of_bus_n_size_cells(node); 706 parser->dma = !strcmp(name, "dma-ranges"); 707 parser->bus = of_match_bus(node); 708 709 parser->range = of_get_property(node, name, &rlen); 710 if (parser->range == NULL) 711 return -ENOENT; 712 713 parser->end = parser->range + rlen / sizeof(__be32); 714 715 return 0; 716 } 717 718 int of_pci_range_parser_init(struct of_pci_range_parser *parser, 719 struct device_node *node) 720 { 721 return parser_init(parser, node, "ranges"); 722 } 723 EXPORT_SYMBOL_GPL(of_pci_range_parser_init); 724 725 int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, 726 struct device_node *node) 727 { 728 return parser_init(parser, node, "dma-ranges"); 729 } 730 EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init); 731 #define of_dma_range_parser_init of_pci_dma_range_parser_init 732 733 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, 734 struct of_pci_range *range) 735 { 736 int na = parser->na; 737 int ns = parser->ns; 738 int np = parser->pna + na + ns; 739 int busflag_na = 0; 740 741 if (!range) 742 return NULL; 743 744 if (!parser->range || parser->range + np > parser->end) 745 return NULL; 746 747 range->flags = parser->bus->get_flags(parser->range); 748 749 /* A extra cell for resource flags */ 750 if (parser->bus->has_flags) 751 busflag_na = 1; 752 753 range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); 754 755 if (parser->dma) 756 range->cpu_addr = of_translate_dma_address(parser->node, 757 parser->range + na); 758 else 759 range->cpu_addr = of_translate_address(parser->node, 760 parser->range + na); 761 range->size = of_read_number(parser->range + parser->pna + na, ns); 762 763 parser->range += np; 764 765 /* Now consume following elements while they are contiguous */ 766 while (parser->range + np <= parser->end) { 767 u32 flags = 0; 768 u64 bus_addr, cpu_addr, size; 769 770 flags = parser->bus->get_flags(parser->range); 771 bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); 772 if (parser->dma) 773 cpu_addr = of_translate_dma_address(parser->node, 774 parser->range + na); 775 else 776 cpu_addr = of_translate_address(parser->node, 777 parser->range + na); 778 size = of_read_number(parser->range + parser->pna + na, ns); 779 780 if (flags != range->flags) 781 break; 782 if (bus_addr != range->bus_addr + range->size || 783 cpu_addr != range->cpu_addr + range->size) 784 break; 785 786 range->size += size; 787 parser->range += np; 788 } 789 790 return range; 791 } 792 EXPORT_SYMBOL_GPL(of_pci_range_parser_one); 793 794 static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr, 795 u64 size) 796 { 797 u64 taddr; 798 unsigned long port; 799 struct device_node *host; 800 801 taddr = __of_translate_address(dev, of_get_parent, 802 in_addr, "ranges", &host); 803 if (host) { 804 /* host-specific port access */ 805 port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size); 806 of_node_put(host); 807 } else { 808 /* memory-mapped I/O range */ 809 port = pci_address_to_pio(taddr); 810 } 811 812 if (port == (unsigned long)-1) 813 return OF_BAD_ADDR; 814 815 return port; 816 } 817 818 static int __of_address_to_resource(struct device_node *dev, 819 const __be32 *addrp, u64 size, unsigned int flags, 820 const char *name, struct resource *r) 821 { 822 u64 taddr; 823 824 if (flags & IORESOURCE_MEM) 825 taddr = of_translate_address(dev, addrp); 826 else if (flags & IORESOURCE_IO) 827 taddr = of_translate_ioport(dev, addrp, size); 828 else 829 return -EINVAL; 830 831 if (taddr == OF_BAD_ADDR) 832 return -EINVAL; 833 memset(r, 0, sizeof(struct resource)); 834 835 r->start = taddr; 836 r->end = taddr + size - 1; 837 r->flags = flags; 838 r->name = name ? name : dev->full_name; 839 840 return 0; 841 } 842 843 /** 844 * of_address_to_resource - Translate device tree address and return as resource 845 * 846 * Note that if your address is a PIO address, the conversion will fail if 847 * the physical address can't be internally converted to an IO token with 848 * pci_address_to_pio(), that is because it's either called too early or it 849 * can't be matched to any host bridge IO space 850 */ 851 int of_address_to_resource(struct device_node *dev, int index, 852 struct resource *r) 853 { 854 const __be32 *addrp; 855 u64 size; 856 unsigned int flags; 857 const char *name = NULL; 858 859 addrp = of_get_address(dev, index, &size, &flags); 860 if (addrp == NULL) 861 return -EINVAL; 862 863 /* Get optional "reg-names" property to add a name to a resource */ 864 of_property_read_string_index(dev, "reg-names", index, &name); 865 866 return __of_address_to_resource(dev, addrp, size, flags, name, r); 867 } 868 EXPORT_SYMBOL_GPL(of_address_to_resource); 869 870 /** 871 * of_iomap - Maps the memory mapped IO for a given device_node 872 * @np: the device whose io range will be mapped 873 * @index: index of the io range 874 * 875 * Returns a pointer to the mapped memory 876 */ 877 void __iomem *of_iomap(struct device_node *np, int index) 878 { 879 struct resource res; 880 881 if (of_address_to_resource(np, index, &res)) 882 return NULL; 883 884 return ioremap(res.start, resource_size(&res)); 885 } 886 EXPORT_SYMBOL(of_iomap); 887 888 /* 889 * of_io_request_and_map - Requests a resource and maps the memory mapped IO 890 * for a given device_node 891 * @device: the device whose io range will be mapped 892 * @index: index of the io range 893 * @name: name "override" for the memory region request or NULL 894 * 895 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded 896 * error code on failure. Usage example: 897 * 898 * base = of_io_request_and_map(node, 0, "foo"); 899 * if (IS_ERR(base)) 900 * return PTR_ERR(base); 901 */ 902 void __iomem *of_io_request_and_map(struct device_node *np, int index, 903 const char *name) 904 { 905 struct resource res; 906 void __iomem *mem; 907 908 if (of_address_to_resource(np, index, &res)) 909 return IOMEM_ERR_PTR(-EINVAL); 910 911 if (!name) 912 name = res.name; 913 if (!request_mem_region(res.start, resource_size(&res), name)) 914 return IOMEM_ERR_PTR(-EBUSY); 915 916 mem = ioremap(res.start, resource_size(&res)); 917 if (!mem) { 918 release_mem_region(res.start, resource_size(&res)); 919 return IOMEM_ERR_PTR(-ENOMEM); 920 } 921 922 return mem; 923 } 924 EXPORT_SYMBOL(of_io_request_and_map); 925 926 /** 927 * of_dma_get_range - Get DMA range info 928 * @np: device node to get DMA range info 929 * @dma_addr: pointer to store initial DMA address of DMA range 930 * @paddr: pointer to store initial CPU address of DMA range 931 * @size: pointer to store size of DMA range 932 * 933 * Look in bottom up direction for the first "dma-ranges" property 934 * and parse it. 935 * dma-ranges format: 936 * DMA addr (dma_addr) : naddr cells 937 * CPU addr (phys_addr_t) : pna cells 938 * size : nsize cells 939 * 940 * It returns -ENODEV if "dma-ranges" property was not found 941 * for this device in DT. 942 */ 943 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) 944 { 945 struct device_node *node = of_node_get(np); 946 const __be32 *ranges = NULL; 947 int len; 948 int ret = 0; 949 bool found_dma_ranges = false; 950 struct of_range_parser parser; 951 struct of_range range; 952 u64 dma_start = U64_MAX, dma_end = 0, dma_offset = 0; 953 954 while (node) { 955 ranges = of_get_property(node, "dma-ranges", &len); 956 957 /* Ignore empty ranges, they imply no translation required */ 958 if (ranges && len > 0) 959 break; 960 961 /* Once we find 'dma-ranges', then a missing one is an error */ 962 if (found_dma_ranges && !ranges) { 963 ret = -ENODEV; 964 goto out; 965 } 966 found_dma_ranges = true; 967 968 node = of_get_next_dma_parent(node); 969 } 970 971 if (!node || !ranges) { 972 pr_debug("no dma-ranges found for node(%pOF)\n", np); 973 ret = -ENODEV; 974 goto out; 975 } 976 977 of_dma_range_parser_init(&parser, node); 978 979 for_each_of_range(&parser, &range) { 980 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 981 range.bus_addr, range.cpu_addr, range.size); 982 983 if (dma_offset && range.cpu_addr - range.bus_addr != dma_offset) { 984 pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node); 985 /* Don't error out as we'd break some existing DTs */ 986 continue; 987 } 988 dma_offset = range.cpu_addr - range.bus_addr; 989 990 /* Take lower and upper limits */ 991 if (range.bus_addr < dma_start) 992 dma_start = range.bus_addr; 993 if (range.bus_addr + range.size > dma_end) 994 dma_end = range.bus_addr + range.size; 995 } 996 997 if (dma_start >= dma_end) { 998 ret = -EINVAL; 999 pr_debug("Invalid DMA ranges configuration on node(%pOF)\n", 1000 node); 1001 goto out; 1002 } 1003 1004 *dma_addr = dma_start; 1005 *size = dma_end - dma_start; 1006 *paddr = dma_start + dma_offset; 1007 1008 pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 1009 *dma_addr, *paddr, *size); 1010 1011 out: 1012 of_node_put(node); 1013 1014 return ret; 1015 } 1016 1017 /** 1018 * of_dma_is_coherent - Check if device is coherent 1019 * @np: device node 1020 * 1021 * It returns true if "dma-coherent" property was found 1022 * for this device in the DT, or if DMA is coherent by 1023 * default for OF devices on the current platform. 1024 */ 1025 bool of_dma_is_coherent(struct device_node *np) 1026 { 1027 struct device_node *node = of_node_get(np); 1028 1029 if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) 1030 return true; 1031 1032 while (node) { 1033 if (of_property_read_bool(node, "dma-coherent")) { 1034 of_node_put(node); 1035 return true; 1036 } 1037 node = of_get_next_dma_parent(node); 1038 } 1039 of_node_put(node); 1040 return false; 1041 } 1042 EXPORT_SYMBOL_GPL(of_dma_is_coherent); 1043